pgindent run for 9.6

This commit is contained in:
Robert Haas 2016-06-09 18:02:36 -04:00
parent 9164deea2f
commit 4bc424b968
252 changed files with 2670 additions and 2558 deletions

View File

@ -165,16 +165,16 @@ _PG_init(void)
DefineCustomRealVariable("auto_explain.sample_rate", DefineCustomRealVariable("auto_explain.sample_rate",
"Fraction of queries to process.", "Fraction of queries to process.",
NULL, NULL,
&auto_explain_sample_rate, &auto_explain_sample_rate,
1.0, 1.0,
0.0, 0.0,
1.0, 1.0,
PGC_SUSET, PGC_SUSET,
0, 0,
NULL, NULL,
NULL, NULL,
NULL); NULL);
EmitWarningsOnPlaceholders("auto_explain"); EmitWarningsOnPlaceholders("auto_explain");
@ -209,12 +209,12 @@ static void
explain_ExecutorStart(QueryDesc *queryDesc, int eflags) explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{ {
/* /*
* For rate sampling, randomly choose top-level statement. Either * For rate sampling, randomly choose top-level statement. Either all
* all nested statements will be explained or none will. * nested statements will be explained or none will.
*/ */
if (auto_explain_log_min_duration >= 0 && nesting_level == 0) if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
current_query_sampled = (random() < auto_explain_sample_rate * current_query_sampled = (random() < auto_explain_sample_rate *
MAX_RANDOM_VALUE); MAX_RANDOM_VALUE);
if (auto_explain_enabled() && current_query_sampled) if (auto_explain_enabled() && current_query_sampled)
{ {

View File

@ -33,11 +33,11 @@ PG_MODULE_MAGIC;
typedef struct typedef struct
{ {
BloomState blstate; /* bloom index state */ BloomState blstate; /* bloom index state */
MemoryContext tmpCtx; /* temporary memory context reset after MemoryContext tmpCtx; /* temporary memory context reset after each
* each tuple */ * tuple */
char data[BLCKSZ]; /* cached page */ char data[BLCKSZ]; /* cached page */
int64 count; /* number of tuples in cached page */ int64 count; /* number of tuples in cached page */
} BloomBuildState; } BloomBuildState;
/* /*
* Flush page cached in BloomBuildState. * Flush page cached in BloomBuildState.
@ -140,8 +140,8 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
bloomBuildCallback, (void *) &buildstate); bloomBuildCallback, (void *) &buildstate);
/* /*
* There are could be some items in cached page. Flush this page * There are could be some items in cached page. Flush this page if
* if needed. * needed.
*/ */
if (buildstate.count > 0) if (buildstate.count > 0)
flushCachedPage(index, &buildstate); flushCachedPage(index, &buildstate);

View File

@ -31,14 +31,13 @@
/* Opaque for bloom pages */ /* Opaque for bloom pages */
typedef struct BloomPageOpaqueData typedef struct BloomPageOpaqueData
{ {
OffsetNumber maxoff; /* number of index tuples on page */ OffsetNumber maxoff; /* number of index tuples on page */
uint16 flags; /* see bit definitions below */ uint16 flags; /* see bit definitions below */
uint16 unused; /* placeholder to force maxaligning of size uint16 unused; /* placeholder to force maxaligning of size of
* of BloomPageOpaqueData and to place * BloomPageOpaqueData and to place
* bloom_page_id exactly at the end of page * bloom_page_id exactly at the end of page */
*/ uint16 bloom_page_id; /* for identification of BLOOM indexes */
uint16 bloom_page_id; /* for identification of BLOOM indexes */ } BloomPageOpaqueData;
} BloomPageOpaqueData;
typedef BloomPageOpaqueData *BloomPageOpaque; typedef BloomPageOpaqueData *BloomPageOpaque;
@ -102,9 +101,9 @@ typedef struct BloomOptions
{ {
int32 vl_len_; /* varlena header (do not touch directly!) */ int32 vl_len_; /* varlena header (do not touch directly!) */
int bloomLength; /* length of signature in words (not bits!) */ int bloomLength; /* length of signature in words (not bits!) */
int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for
* index key */ * each index key */
} BloomOptions; } BloomOptions;
/* /*
* FreeBlockNumberArray - array of block numbers sized so that metadata fill * FreeBlockNumberArray - array of block numbers sized so that metadata fill
@ -125,7 +124,7 @@ typedef struct BloomMetaPageData
uint16 nEnd; uint16 nEnd;
BloomOptions opts; BloomOptions opts;
FreeBlockNumberArray notFullPage; FreeBlockNumberArray notFullPage;
} BloomMetaPageData; } BloomMetaPageData;
/* Magic number to distinguish bloom pages among anothers */ /* Magic number to distinguish bloom pages among anothers */
#define BLOOM_MAGICK_NUMBER (0xDBAC0DED) #define BLOOM_MAGICK_NUMBER (0xDBAC0DED)
@ -146,7 +145,7 @@ typedef struct BloomState
* precompute it * precompute it
*/ */
Size sizeOfBloomTuple; Size sizeOfBloomTuple;
} BloomState; } BloomState;
#define BloomPageGetFreeSpace(state, page) \ #define BloomPageGetFreeSpace(state, page) \
(BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \
@ -160,30 +159,30 @@ typedef struct BloomTuple
{ {
ItemPointerData heapPtr; ItemPointerData heapPtr;
BloomSignatureWord sign[FLEXIBLE_ARRAY_MEMBER]; BloomSignatureWord sign[FLEXIBLE_ARRAY_MEMBER];
} BloomTuple; } BloomTuple;
#define BLOOMTUPLEHDRSZ offsetof(BloomTuple, sign) #define BLOOMTUPLEHDRSZ offsetof(BloomTuple, sign)
/* Opaque data structure for bloom index scan */ /* Opaque data structure for bloom index scan */
typedef struct BloomScanOpaqueData typedef struct BloomScanOpaqueData
{ {
BloomSignatureWord *sign; /* Scan signature */ BloomSignatureWord *sign; /* Scan signature */
BloomState state; BloomState state;
} BloomScanOpaqueData; } BloomScanOpaqueData;
typedef BloomScanOpaqueData *BloomScanOpaque; typedef BloomScanOpaqueData *BloomScanOpaque;
/* blutils.c */ /* blutils.c */
extern void _PG_init(void); extern void _PG_init(void);
extern Datum blhandler(PG_FUNCTION_ARGS); extern Datum blhandler(PG_FUNCTION_ARGS);
extern void initBloomState(BloomState * state, Relation index); extern void initBloomState(BloomState *state, Relation index);
extern void BloomFillMetapage(Relation index, Page metaPage); extern void BloomFillMetapage(Relation index, Page metaPage);
extern void BloomInitMetapage(Relation index); extern void BloomInitMetapage(Relation index);
extern void BloomInitPage(Page page, uint16 flags); extern void BloomInitPage(Page page, uint16 flags);
extern Buffer BloomNewBuffer(Relation index); extern Buffer BloomNewBuffer(Relation index);
extern void signValue(BloomState * state, BloomSignatureWord * sign, Datum value, int attno); extern void signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno);
extern BloomTuple *BloomFormTuple(BloomState * state, ItemPointer iptr, Datum *values, bool *isnull); extern BloomTuple *BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull);
extern bool BloomPageAddItem(BloomState * state, Page page, BloomTuple * tuple); extern bool BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple);
/* blvalidate.c */ /* blvalidate.c */
extern bool blvalidate(Oid opclassoid); extern bool blvalidate(Oid opclassoid);

View File

@ -37,6 +37,7 @@ PG_FUNCTION_INFO_V1(blhandler);
/* Kind of relation options for bloom index */ /* Kind of relation options for bloom index */
static relopt_kind bl_relopt_kind; static relopt_kind bl_relopt_kind;
/* parse table for fillRelOptions */ /* parse table for fillRelOptions */
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1]; static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
@ -215,7 +216,9 @@ myRand(void)
* October 1988, p. 1195. * October 1988, p. 1195.
*---------- *----------
*/ */
int32 hi, lo, x; int32 hi,
lo,
x;
/* Must be in [1, 0x7ffffffe] range at this point. */ /* Must be in [1, 0x7ffffffe] range at this point. */
hi = next / 127773; hi = next / 127773;

View File

@ -78,7 +78,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* Iterate over the tuples */ /* Iterate over the tuples */
itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber); itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber);
itupEnd = BloomPageGetTuple(&state, page, itupEnd = BloomPageGetTuple(&state, page,
OffsetNumberNext(BloomPageGetMaxOffset(page))); OffsetNumberNext(BloomPageGetMaxOffset(page)));
while (itup < itupEnd) while (itup < itupEnd)
{ {
/* Do we have to delete this tuple? */ /* Do we have to delete this tuple? */
@ -106,11 +106,11 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
} }
Assert(itupPtr == BloomPageGetTuple(&state, page, Assert(itupPtr == BloomPageGetTuple(&state, page,
OffsetNumberNext(BloomPageGetMaxOffset(page)))); OffsetNumberNext(BloomPageGetMaxOffset(page))));
/* /*
* Add page to notFullPage list if we will not mark page as deleted and * Add page to notFullPage list if we will not mark page as deleted
* there is a free space on it * and there is a free space on it
*/ */
if (BloomPageGetMaxOffset(page) != 0 && if (BloomPageGetMaxOffset(page) != 0 &&
BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple && BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple &&

View File

@ -132,7 +132,7 @@ static bool fileAnalyzeForeignTable(Relation relation,
AcquireSampleRowsFunc *func, AcquireSampleRowsFunc *func,
BlockNumber *totalpages); BlockNumber *totalpages);
static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte); RangeTblEntry *rte);
/* /*
* Helper functions * Helper functions
@ -767,12 +767,12 @@ fileAnalyzeForeignTable(Relation relation,
/* /*
* fileIsForeignScanParallelSafe * fileIsForeignScanParallelSafe
* Reading a file in a parallel worker should work just the same as * Reading a file in a parallel worker should work just the same as
* reading it in the leader, so mark scans safe. * reading it in the leader, so mark scans safe.
*/ */
static bool static bool
fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte) RangeTblEntry *rte)
{ {
return true; return true;
} }

View File

@ -444,9 +444,9 @@ ean2ISBN(char *isn)
unsigned check; unsigned check;
/* /*
* The number should come in this format: 978-0-000-00000-0 * The number should come in this format: 978-0-000-00000-0 or may be an
* or may be an ISBN-13 number, 979-..., which does not have a short * ISBN-13 number, 979-..., which does not have a short representation. Do
* representation. Do the short output version if possible. * the short output version if possible.
*/ */
if (strncmp("978-", isn, 4) == 0) if (strncmp("978-", isn, 4) == 0)
{ {

View File

@ -82,7 +82,7 @@ text_to_bits(char *str, int len)
else else
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED), (errcode(ERRCODE_DATA_CORRUPTED),
errmsg("illegal character '%c' in t_bits string", str[off]))); errmsg("illegal character '%c' in t_bits string", str[off])));
if (off % 8 == 7) if (off % 8 == 7)
bits[off / 8] = byte; bits[off / 8] = byte;
@ -192,9 +192,9 @@ heap_page_items(PG_FUNCTION_ARGS)
lp_offset == MAXALIGN(lp_offset) && lp_offset == MAXALIGN(lp_offset) &&
lp_offset + lp_len <= raw_page_size) lp_offset + lp_len <= raw_page_size)
{ {
HeapTupleHeader tuphdr; HeapTupleHeader tuphdr;
bytea *tuple_data_bytea; bytea *tuple_data_bytea;
int tuple_data_len; int tuple_data_len;
/* Extract information from the tuple header */ /* Extract information from the tuple header */
@ -214,7 +214,7 @@ heap_page_items(PG_FUNCTION_ARGS)
tuple_data_bytea = (bytea *) palloc(tuple_data_len + VARHDRSZ); tuple_data_bytea = (bytea *) palloc(tuple_data_len + VARHDRSZ);
SET_VARSIZE(tuple_data_bytea, tuple_data_len + VARHDRSZ); SET_VARSIZE(tuple_data_bytea, tuple_data_len + VARHDRSZ);
memcpy(VARDATA(tuple_data_bytea), (char *) tuphdr + tuphdr->t_hoff, memcpy(VARDATA(tuple_data_bytea), (char *) tuphdr + tuphdr->t_hoff,
tuple_data_len); tuple_data_len);
values[13] = PointerGetDatum(tuple_data_bytea); values[13] = PointerGetDatum(tuple_data_bytea);
/* /*
@ -284,16 +284,16 @@ heap_page_items(PG_FUNCTION_ARGS)
*/ */
static Datum static Datum
tuple_data_split_internal(Oid relid, char *tupdata, tuple_data_split_internal(Oid relid, char *tupdata,
uint16 tupdata_len, uint16 t_infomask, uint16 tupdata_len, uint16 t_infomask,
uint16 t_infomask2, bits8 *t_bits, uint16 t_infomask2, bits8 *t_bits,
bool do_detoast) bool do_detoast)
{ {
ArrayBuildState *raw_attrs; ArrayBuildState *raw_attrs;
int nattrs; int nattrs;
int i; int i;
int off = 0; int off = 0;
Relation rel; Relation rel;
TupleDesc tupdesc; TupleDesc tupdesc;
/* Get tuple descriptor from relation OID */ /* Get tuple descriptor from relation OID */
rel = relation_open(relid, NoLock); rel = relation_open(relid, NoLock);
@ -310,30 +310,31 @@ tuple_data_split_internal(Oid relid, char *tupdata,
for (i = 0; i < nattrs; i++) for (i = 0; i < nattrs; i++)
{ {
Form_pg_attribute attr; Form_pg_attribute attr;
bool is_null; bool is_null;
bytea *attr_data = NULL; bytea *attr_data = NULL;
attr = tupdesc->attrs[i]; attr = tupdesc->attrs[i];
is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits); is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
/* /*
* Tuple header can specify less attributes than tuple descriptor * Tuple header can specify less attributes than tuple descriptor as
* as ALTER TABLE ADD COLUMN without DEFAULT keyword does not * ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
* actually change tuples in pages, so attributes with numbers greater * change tuples in pages, so attributes with numbers greater than
* than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL. * (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
*/ */
if (i >= (t_infomask2 & HEAP_NATTS_MASK)) if (i >= (t_infomask2 & HEAP_NATTS_MASK))
is_null = true; is_null = true;
if (!is_null) if (!is_null)
{ {
int len; int len;
if (attr->attlen == -1) if (attr->attlen == -1)
{ {
off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1, off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1,
tupdata + off); tupdata + off);
/* /*
* As VARSIZE_ANY throws an exception if it can't properly * As VARSIZE_ANY throws an exception if it can't properly
* detect the type of external storage in macros VARTAG_SIZE, * detect the type of external storage in macros VARTAG_SIZE,
@ -343,8 +344,8 @@ tuple_data_split_internal(Oid relid, char *tupdata,
!VARATT_IS_EXTERNAL_ONDISK(tupdata + off) && !VARATT_IS_EXTERNAL_ONDISK(tupdata + off) &&
!VARATT_IS_EXTERNAL_INDIRECT(tupdata + off)) !VARATT_IS_EXTERNAL_INDIRECT(tupdata + off))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED), (errcode(ERRCODE_DATA_CORRUPTED),
errmsg("first byte of varlena attribute is incorrect for attribute %d", i))); errmsg("first byte of varlena attribute is incorrect for attribute %d", i)));
len = VARSIZE_ANY(tupdata + off); len = VARSIZE_ANY(tupdata + off);
} }
@ -381,7 +382,7 @@ tuple_data_split_internal(Oid relid, char *tupdata,
if (tupdata_len != off) if (tupdata_len != off)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED), (errcode(ERRCODE_DATA_CORRUPTED),
errmsg("end of tuple reached without looking at all its data"))); errmsg("end of tuple reached without looking at all its data")));
return makeArrayResult(raw_attrs, CurrentMemoryContext); return makeArrayResult(raw_attrs, CurrentMemoryContext);
} }
@ -397,14 +398,14 @@ PG_FUNCTION_INFO_V1(tuple_data_split);
Datum Datum
tuple_data_split(PG_FUNCTION_ARGS) tuple_data_split(PG_FUNCTION_ARGS)
{ {
Oid relid; Oid relid;
bytea *raw_data; bytea *raw_data;
uint16 t_infomask; uint16 t_infomask;
uint16 t_infomask2; uint16 t_infomask2;
char *t_bits_str; char *t_bits_str;
bool do_detoast = false; bool do_detoast = false;
bits8 *t_bits = NULL; bits8 *t_bits = NULL;
Datum res; Datum res;
relid = PG_GETARG_OID(0); relid = PG_GETARG_OID(0);
raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1); raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1);
@ -430,8 +431,8 @@ tuple_data_split(PG_FUNCTION_ARGS)
*/ */
if (t_infomask & HEAP_HASNULL) if (t_infomask & HEAP_HASNULL)
{ {
int bits_str_len; int bits_str_len;
int bits_len; int bits_len;
bits_len = (t_infomask2 & HEAP_NATTS_MASK) / 8 + 1; bits_len = (t_infomask2 & HEAP_NATTS_MASK) / 8 + 1;
if (!t_bits_str) if (!t_bits_str)

View File

@ -265,13 +265,13 @@ gin_trgm_consistent(PG_FUNCTION_ARGS)
Datum Datum
gin_trgm_triconsistent(PG_FUNCTION_ARGS) gin_trgm_triconsistent(PG_FUNCTION_ARGS)
{ {
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0); GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1); StrategyNumber strategy = PG_GETARG_UINT16(1);
/* text *query = PG_GETARG_TEXT_P(2); */ /* text *query = PG_GETARG_TEXT_P(2); */
int32 nkeys = PG_GETARG_INT32(3); int32 nkeys = PG_GETARG_INT32(3);
Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4);
GinTernaryValue res = GIN_MAYBE; GinTernaryValue res = GIN_MAYBE;
int32 i, int32 i,
ntrue; ntrue;
bool *boolcheck; bool *boolcheck;
@ -293,11 +293,12 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
} }
/* /*
* See comment in gin_trgm_consistent() about * upper bound formula * See comment in gin_trgm_consistent() about * upper bound
* formula
*/ */
res = (nkeys == 0) res = (nkeys == 0)
? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit) ? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit)
? GIN_MAYBE : GIN_FALSE); ? GIN_MAYBE : GIN_FALSE);
break; break;
case ILikeStrategyNumber: case ILikeStrategyNumber:
#ifndef IGNORECASE #ifndef IGNORECASE
@ -330,9 +331,9 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
else else
{ {
/* /*
* As trigramsMatchGraph implements a monotonic boolean function, * As trigramsMatchGraph implements a monotonic boolean
* promoting all GIN_MAYBE keys to GIN_TRUE will give a * function, promoting all GIN_MAYBE keys to GIN_TRUE will
* conservative result. * give a conservative result.
*/ */
boolcheck = (bool *) palloc(sizeof(bool) * nkeys); boolcheck = (bool *) palloc(sizeof(bool) * nkeys);
for (i = 0; i < nkeys; i++) for (i = 0; i < nkeys; i++)
@ -345,7 +346,7 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
break; break;
default: default:
elog(ERROR, "unrecognized strategy number: %d", strategy); elog(ERROR, "unrecognized strategy number: %d", strategy);
res = GIN_FALSE; /* keep compiler quiet */ res = GIN_FALSE; /* keep compiler quiet */
break; break;
} }

View File

@ -296,6 +296,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
if (GIST_LEAF(entry)) if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */ { /* all leafs contains orig trgm */
/* /*
* Prevent gcc optimizing the tmpsml variable using volatile * Prevent gcc optimizing the tmpsml variable using volatile
* keyword. Otherwise comparison of nlimit and tmpsml may give * keyword. Otherwise comparison of nlimit and tmpsml may give
@ -476,12 +477,14 @@ gtrgm_distance(PG_FUNCTION_ARGS)
*recheck = strategy == WordDistanceStrategyNumber; *recheck = strategy == WordDistanceStrategyNumber;
if (GIST_LEAF(entry)) if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */ { /* all leafs contains orig trgm */
/* /*
* Prevent gcc optimizing the sml variable using volatile * Prevent gcc optimizing the sml variable using volatile
* keyword. Otherwise res can differ from the * keyword. Otherwise res can differ from the
* word_similarity_dist_op() function. * word_similarity_dist_op() function.
*/ */
float4 volatile sml = cnt_sml(qtrg, key, *recheck); float4 volatile sml = cnt_sml(qtrg, key, *recheck);
res = 1.0 - sml; res = 1.0 - sml;
} }
else if (ISALLTRUE(key)) else if (ISALLTRUE(key))

View File

@ -16,8 +16,8 @@
PG_MODULE_MAGIC; PG_MODULE_MAGIC;
/* GUC variables */ /* GUC variables */
double similarity_threshold = 0.3f; double similarity_threshold = 0.3f;
double word_similarity_threshold = 0.6f; double word_similarity_threshold = 0.6f;
void _PG_init(void); void _PG_init(void);
@ -36,8 +36,8 @@ PG_FUNCTION_INFO_V1(word_similarity_dist_commutator_op);
/* Trigram with position */ /* Trigram with position */
typedef struct typedef struct
{ {
trgm trg; trgm trg;
int index; int index;
} pos_trgm; } pos_trgm;
/* /*
@ -48,29 +48,29 @@ _PG_init(void)
{ {
/* Define custom GUC variables. */ /* Define custom GUC variables. */
DefineCustomRealVariable("pg_trgm.similarity_threshold", DefineCustomRealVariable("pg_trgm.similarity_threshold",
"Sets the threshold used by the %% operator.", "Sets the threshold used by the %% operator.",
"Valid range is 0.0 .. 1.0.", "Valid range is 0.0 .. 1.0.",
&similarity_threshold, &similarity_threshold,
0.3, 0.3,
0.0, 0.0,
1.0, 1.0,
PGC_USERSET, PGC_USERSET,
0, 0,
NULL, NULL,
NULL, NULL,
NULL); NULL);
DefineCustomRealVariable("pg_trgm.word_similarity_threshold", DefineCustomRealVariable("pg_trgm.word_similarity_threshold",
"Sets the threshold used by the <%% operator.", "Sets the threshold used by the <%% operator.",
"Valid range is 0.0 .. 1.0.", "Valid range is 0.0 .. 1.0.",
&word_similarity_threshold, &word_similarity_threshold,
0.6, 0.6,
0.0, 0.0,
1.0, 1.0,
PGC_USERSET, PGC_USERSET,
0, 0,
NULL, NULL,
NULL, NULL,
NULL); NULL);
} }
/* /*
@ -352,9 +352,9 @@ generate_trgm(char *str, int slen)
* Make array of positional trigrams from two trigram arrays trg1 and trg2. * Make array of positional trigrams from two trigram arrays trg1 and trg2.
* *
* trg1: trigram array of search pattern, of length len1. trg1 is required * trg1: trigram array of search pattern, of length len1. trg1 is required
* word which positions don't matter and replaced with -1. * word which positions don't matter and replaced with -1.
* trg2: trigram array of text, of length len2. trg2 is haystack where we * trg2: trigram array of text, of length len2. trg2 is haystack where we
* search and have to store its positions. * search and have to store its positions.
* *
* Returns concatenated trigram array. * Returns concatenated trigram array.
*/ */
@ -362,7 +362,8 @@ static pos_trgm *
make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2) make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
{ {
pos_trgm *result; pos_trgm *result;
int i, len = len1 + len2; int i,
len = len1 + len2;
result = (pos_trgm *) palloc(sizeof(pos_trgm) * len); result = (pos_trgm *) palloc(sizeof(pos_trgm) * len);
@ -387,9 +388,9 @@ make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
static int static int
comp_ptrgm(const void *v1, const void *v2) comp_ptrgm(const void *v1, const void *v2)
{ {
const pos_trgm *p1 = (const pos_trgm *)v1; const pos_trgm *p1 = (const pos_trgm *) v1;
const pos_trgm *p2 = (const pos_trgm *)v2; const pos_trgm *p2 = (const pos_trgm *) v2;
int cmp; int cmp;
cmp = CMPTRGM(p1->trg, p2->trg); cmp = CMPTRGM(p1->trg, p2->trg);
if (cmp != 0) if (cmp != 0)
@ -413,7 +414,7 @@ comp_ptrgm(const void *v1, const void *v2)
* len2: length of array "trg2" and array "trg2indexes". * len2: length of array "trg2" and array "trg2indexes".
* len: length of the array "found". * len: length of the array "found".
* check_only: if true then only check existaince of similar search pattern in * check_only: if true then only check existaince of similar search pattern in
* text. * text.
* *
* Returns word similarity. * Returns word similarity.
*/ */
@ -441,7 +442,7 @@ iterate_word_similarity(int *trg2indexes,
for (i = 0; i < len2; i++) for (i = 0; i < len2; i++)
{ {
/* Get index of next trigram */ /* Get index of next trigram */
int trgindex = trg2indexes[i]; int trgindex = trg2indexes[i];
/* Update last position of this trigram */ /* Update last position of this trigram */
if (lower >= 0 || found[trgindex]) if (lower >= 0 || found[trgindex])
@ -458,10 +459,10 @@ iterate_word_similarity(int *trg2indexes,
/* Adjust lower bound if this trigram is present in required substing */ /* Adjust lower bound if this trigram is present in required substing */
if (found[trgindex]) if (found[trgindex])
{ {
int prev_lower, int prev_lower,
tmp_ulen2, tmp_ulen2,
tmp_lower, tmp_lower,
tmp_count; tmp_count;
upper = i; upper = i;
if (lower == -1) if (lower == -1)
@ -478,8 +479,8 @@ iterate_word_similarity(int *trg2indexes,
prev_lower = lower; prev_lower = lower;
for (tmp_lower = lower; tmp_lower <= upper; tmp_lower++) for (tmp_lower = lower; tmp_lower <= upper; tmp_lower++)
{ {
float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2); float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2);
int tmp_trgindex; int tmp_trgindex;
if (smlr_tmp > smlr_cur) if (smlr_tmp > smlr_cur)
{ {
@ -488,10 +489,11 @@ iterate_word_similarity(int *trg2indexes,
lower = tmp_lower; lower = tmp_lower;
count = tmp_count; count = tmp_count;
} }
/* /*
* if we only check that word similarity is greater than * if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate * pg_trgm.word_similarity_threshold we do not need to
* a maximum similarity. * calculate a maximum similarity.
*/ */
if (check_only && smlr_cur >= word_similarity_threshold) if (check_only && smlr_cur >= word_similarity_threshold)
break; break;
@ -506,6 +508,7 @@ iterate_word_similarity(int *trg2indexes,
} }
smlr_max = Max(smlr_max, smlr_cur); smlr_max = Max(smlr_max, smlr_cur);
/* /*
* if we only check that word similarity is greater than * if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate a * pg_trgm.word_similarity_threshold we do not need to calculate a
@ -516,7 +519,8 @@ iterate_word_similarity(int *trg2indexes,
for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++) for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++)
{ {
int tmp_trgindex; int tmp_trgindex;
tmp_trgindex = trg2indexes[tmp_lower]; tmp_trgindex = trg2indexes[tmp_lower];
if (lastpos[tmp_trgindex] == tmp_lower) if (lastpos[tmp_trgindex] == tmp_lower)
lastpos[tmp_trgindex] = -1; lastpos[tmp_trgindex] = -1;
@ -544,13 +548,13 @@ iterate_word_similarity(int *trg2indexes,
* str1: search pattern string, of length slen1 bytes. * str1: search pattern string, of length slen1 bytes.
* str2: text in which we are looking for a word, of length slen2 bytes. * str2: text in which we are looking for a word, of length slen2 bytes.
* check_only: if true then only check existaince of similar search pattern in * check_only: if true then only check existaince of similar search pattern in
* text. * text.
* *
* Returns word similarity. * Returns word similarity.
*/ */
static float4 static float4
calc_word_similarity(char *str1, int slen1, char *str2, int slen2, calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
bool check_only) bool check_only)
{ {
bool *found; bool *found;
pos_trgm *ptrg; pos_trgm *ptrg;
@ -568,8 +572,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
protect_out_of_mem(slen1 + slen2); protect_out_of_mem(slen1 + slen2);
/* Make positional trigrams */ /* Make positional trigrams */
trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3); trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) *3);
trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3); trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) *3);
len1 = generate_trgm_only(trg1, str1, slen1); len1 = generate_trgm_only(trg1, str1, slen1);
len2 = generate_trgm_only(trg2, str2, slen2); len2 = generate_trgm_only(trg2, str2, slen2);
@ -594,7 +598,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
{ {
if (i > 0) if (i > 0)
{ {
int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg); int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
if (cmp != 0) if (cmp != 0)
{ {
if (found[j]) if (found[j])
@ -617,7 +622,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
/* Run iterative procedure to find maximum similarity with word */ /* Run iterative procedure to find maximum similarity with word */
result = iterate_word_similarity(trg2indexes, found, ulen1, len2, len, result = iterate_word_similarity(trg2indexes, found, ulen1, len2, len,
check_only); check_only);
pfree(trg2indexes); pfree(trg2indexes);
pfree(found); pfree(found);
@ -1075,8 +1080,8 @@ word_similarity(PG_FUNCTION_ARGS)
float4 res; float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
false); false);
PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1); PG_FREE_IF_COPY(in2, 1);
@ -1111,8 +1116,8 @@ word_similarity_op(PG_FUNCTION_ARGS)
float4 res; float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
true); true);
PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1); PG_FREE_IF_COPY(in2, 1);
@ -1127,8 +1132,8 @@ word_similarity_commutator_op(PG_FUNCTION_ARGS)
float4 res; float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
true); true);
PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1); PG_FREE_IF_COPY(in2, 1);
@ -1143,8 +1148,8 @@ word_similarity_dist_op(PG_FUNCTION_ARGS)
float4 res; float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
false); false);
PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1); PG_FREE_IF_COPY(in2, 1);
@ -1159,8 +1164,8 @@ word_similarity_dist_commutator_op(PG_FUNCTION_ARGS)
float4 res; float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
false); false);
PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1); PG_FREE_IF_COPY(in2, 1);

View File

@ -20,8 +20,8 @@ PG_MODULE_MAGIC;
typedef struct vbits typedef struct vbits
{ {
BlockNumber next; BlockNumber next;
BlockNumber count; BlockNumber count;
uint8 bits[FLEXIBLE_ARRAY_MEMBER]; uint8 bits[FLEXIBLE_ARRAY_MEMBER];
} vbits; } vbits;
@ -129,7 +129,7 @@ pg_visibility_map_rel(PG_FUNCTION_ARGS)
if (SRF_IS_FIRSTCALL()) if (SRF_IS_FIRSTCALL())
{ {
Oid relid = PG_GETARG_OID(0); Oid relid = PG_GETARG_OID(0);
MemoryContext oldcontext; MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT(); funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@ -173,7 +173,7 @@ pg_visibility_rel(PG_FUNCTION_ARGS)
if (SRF_IS_FIRSTCALL()) if (SRF_IS_FIRSTCALL())
{ {
Oid relid = PG_GETARG_OID(0); Oid relid = PG_GETARG_OID(0);
MemoryContext oldcontext; MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT(); funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@ -214,8 +214,8 @@ pg_visibility_map_summary(PG_FUNCTION_ARGS)
{ {
Oid relid = PG_GETARG_OID(0); Oid relid = PG_GETARG_OID(0);
Relation rel; Relation rel;
BlockNumber nblocks; BlockNumber nblocks;
BlockNumber blkno; BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer; Buffer vmbuffer = InvalidBuffer;
int64 all_visible = 0; int64 all_visible = 0;
int64 all_frozen = 0; int64 all_frozen = 0;
@ -292,16 +292,16 @@ static vbits *
collect_visibility_data(Oid relid, bool include_pd) collect_visibility_data(Oid relid, bool include_pd)
{ {
Relation rel; Relation rel;
BlockNumber nblocks; BlockNumber nblocks;
vbits *info; vbits *info;
BlockNumber blkno; BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer; Buffer vmbuffer = InvalidBuffer;
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD); BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
rel = relation_open(relid, AccessShareLock); rel = relation_open(relid, AccessShareLock);
nblocks = RelationGetNumberOfBlocks(rel); nblocks = RelationGetNumberOfBlocks(rel);
info = palloc0(offsetof(vbits, bits) + nblocks); info = palloc0(offsetof(vbits, bits) +nblocks);
info->next = 0; info->next = 0;
info->count = nblocks; info->count = nblocks;
@ -320,8 +320,8 @@ collect_visibility_data(Oid relid, bool include_pd)
info->bits[blkno] |= (1 << 1); info->bits[blkno] |= (1 << 1);
/* /*
* Page-level data requires reading every block, so only get it if * Page-level data requires reading every block, so only get it if the
* the caller needs it. Use a buffer access strategy, too, to prevent * caller needs it. Use a buffer access strategy, too, to prevent
* cache-trashing. * cache-trashing.
*/ */
if (include_pd) if (include_pd)

View File

@ -124,7 +124,7 @@ struct PGP_S2K
uint8 mode; uint8 mode;
uint8 digest_algo; uint8 digest_algo;
uint8 salt[8]; uint8 salt[8];
uint8 iter; /* encoded (one-octet) count */ uint8 iter; /* encoded (one-octet) count */
/* calculated: */ /* calculated: */
uint8 key[PGP_MAX_KEY]; uint8 key[PGP_MAX_KEY];
uint8 key_len; uint8 key_len;

View File

@ -486,11 +486,11 @@ pgfdw_get_result(PGconn *conn, const char *query)
for (;;) for (;;)
{ {
PGresult *res; PGresult *res;
while (PQisBusy(conn)) while (PQisBusy(conn))
{ {
int wc; int wc;
/* Sleep until there's something to do */ /* Sleep until there's something to do */
wc = WaitLatchOrSocket(MyLatch, wc = WaitLatchOrSocket(MyLatch,
@ -675,9 +675,9 @@ pgfdw_xact_callback(XactEvent event, void *arg)
/* /*
* If a command has been submitted to the remote server by * If a command has been submitted to the remote server by
* using an asynchronous execution function, the command * using an asynchronous execution function, the command
* might not have yet completed. Check to see if a command * might not have yet completed. Check to see if a
* is still being processed by the remote server, and if so, * command is still being processed by the remote server,
* request cancellation of the command. * and if so, request cancellation of the command.
*/ */
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{ {
@ -689,8 +689,8 @@ pgfdw_xact_callback(XactEvent event, void *arg)
if (!PQcancel(cancel, errbuf, sizeof(errbuf))) if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
ereport(WARNING, ereport(WARNING,
(errcode(ERRCODE_CONNECTION_FAILURE), (errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("could not send cancel request: %s", errmsg("could not send cancel request: %s",
errbuf))); errbuf)));
PQfreeCancel(cancel); PQfreeCancel(cancel);
} }
} }
@ -798,11 +798,11 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
entry->have_error = true; entry->have_error = true;
/* /*
* If a command has been submitted to the remote server by using an * If a command has been submitted to the remote server by using
* asynchronous execution function, the command might not have yet * an asynchronous execution function, the command might not have
* completed. Check to see if a command is still being processed by * yet completed. Check to see if a command is still being
* the remote server, and if so, request cancellation of the * processed by the remote server, and if so, request cancellation
* command. * of the command.
*/ */
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{ {

View File

@ -1583,10 +1583,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* /*
* All other system attributes are fetched as 0, except for table OID, * All other system attributes are fetched as 0, except for table OID,
* which is fetched as the local table OID. However, we must be * which is fetched as the local table OID. However, we must be
* careful; the table could be beneath an outer join, in which case * careful; the table could be beneath an outer join, in which case it
* it must go to NULL whenever the rest of the row does. * must go to NULL whenever the rest of the row does.
*/ */
Oid fetchval = 0; Oid fetchval = 0;
if (varattno == TableOidAttributeNumber) if (varattno == TableOidAttributeNumber)
{ {
@ -1633,10 +1633,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
0 - FirstLowInvalidHeapAttributeNumber); 0 - FirstLowInvalidHeapAttributeNumber);
/* /*
* In case the whole-row reference is under an outer join then it has to * In case the whole-row reference is under an outer join then it has
* go NULL whenver the rest of the row goes NULL. Deparsing a join query * to go NULL whenver the rest of the row goes NULL. Deparsing a join
* would always involve multiple relations, thus qualify_col would be * query would always involve multiple relations, thus qualify_col
* true. * would be true.
*/ */
if (qualify_col) if (qualify_col)
{ {
@ -1652,7 +1652,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* Complete the CASE WHEN statement started above. */ /* Complete the CASE WHEN statement started above. */
if (qualify_col) if (qualify_col)
appendStringInfo(buf," END"); appendStringInfo(buf, " END");
heap_close(rel, NoLock); heap_close(rel, NoLock);
bms_free(attrs_used); bms_free(attrs_used);

View File

@ -133,9 +133,9 @@ postgres_fdw_validator(PG_FUNCTION_ARGS)
} }
else if (strcmp(def->defname, "fetch_size") == 0) else if (strcmp(def->defname, "fetch_size") == 0)
{ {
int fetch_size; int fetch_size;
fetch_size = strtol(defGetString(def), NULL,10); fetch_size = strtol(defGetString(def), NULL, 10);
if (fetch_size <= 0) if (fetch_size <= 0)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR), (errcode(ERRCODE_SYNTAX_ERROR),

View File

@ -4063,19 +4063,20 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
/* /*
* Pull the other remote conditions from the joining relations into join * Pull the other remote conditions from the joining relations into join
* clauses or other remote clauses (remote_conds) of this relation wherever * clauses or other remote clauses (remote_conds) of this relation
* possible. This avoids building subqueries at every join step, which is * wherever possible. This avoids building subqueries at every join step,
* not currently supported by the deparser logic. * which is not currently supported by the deparser logic.
* *
* For an inner join, clauses from both the relations are added to the * For an inner join, clauses from both the relations are added to the
* other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
* outer side are added to remote_conds since those can be evaluated after * the outer side are added to remote_conds since those can be evaluated
* the join is evaluated. The clauses from inner side are added to the * after the join is evaluated. The clauses from inner side are added to
* joinclauses, since they need to evaluated while constructing the join. * the joinclauses, since they need to evaluated while constructing the
* join.
* *
* For a FULL OUTER JOIN, the other clauses from either relation can not be * For a FULL OUTER JOIN, the other clauses from either relation can not
* added to the joinclauses or remote_conds, since each relation acts as an * be added to the joinclauses or remote_conds, since each relation acts
* outer relation for the other. Consider such full outer join as * as an outer relation for the other. Consider such full outer join as
* unshippable because of the reasons mentioned above in this comment. * unshippable because of the reasons mentioned above in this comment.
* *
* The joining sides can not have local conditions, thus no need to test * The joining sides can not have local conditions, thus no need to test

View File

@ -78,7 +78,7 @@ typedef struct PgFdwRelationInfo
ForeignServer *server; ForeignServer *server;
UserMapping *user; /* only set in use_remote_estimate mode */ UserMapping *user; /* only set in use_remote_estimate mode */
int fetch_size; /* fetch size for this remote table */ int fetch_size; /* fetch size for this remote table */
/* /*
* Name of the relation while EXPLAINing ForeignScan. It is used for join * Name of the relation while EXPLAINing ForeignScan. It is used for join
@ -133,23 +133,23 @@ extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root,
List *targetAttrs, List *returningList, List *targetAttrs, List *returningList,
List **retrieved_attrs); List **retrieved_attrs);
extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel, Index rtindex, Relation rel,
List *targetlist, List *targetlist,
List *targetAttrs, List *targetAttrs,
List *remote_conds, List *remote_conds,
List **params_list, List **params_list,
List *returningList, List *returningList,
List **retrieved_attrs); List **retrieved_attrs);
extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root, extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel, Index rtindex, Relation rel,
List *returningList, List *returningList,
List **retrieved_attrs); List **retrieved_attrs);
extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root, extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel, Index rtindex, Relation rel,
List *remote_conds, List *remote_conds,
List **params_list, List **params_list,
List *returningList, List *returningList,
List **retrieved_attrs); List **retrieved_attrs);
extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel); extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
extern void deparseAnalyzeSql(StringInfo buf, Relation rel, extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
List **retrieved_attrs); List **retrieved_attrs);

View File

@ -494,8 +494,8 @@ ssl_extension_info(PG_FUNCTION_ARGS)
if (nid == NID_undef) if (nid == NID_undef)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unknown OpenSSL extension in certificate at position %d", errmsg("unknown OpenSSL extension in certificate at position %d",
call_cntr))); call_cntr)));
values[0] = CStringGetTextDatum(OBJ_nid2sn(nid)); values[0] = CStringGetTextDatum(OBJ_nid2sn(nid));
nulls[0] = false; nulls[0] = false;

View File

@ -65,9 +65,9 @@ static void pg_decode_change(LogicalDecodingContext *ctx,
static bool pg_decode_filter(LogicalDecodingContext *ctx, static bool pg_decode_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id); RepOriginId origin_id);
static void pg_decode_message(LogicalDecodingContext *ctx, static void pg_decode_message(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, XLogRecPtr message_lsn, ReorderBufferTXN *txn, XLogRecPtr message_lsn,
bool transactional, const char *prefix, bool transactional, const char *prefix,
Size sz, const char *message); Size sz, const char *message);
void void
_PG_init(void) _PG_init(void)

View File

@ -47,7 +47,7 @@ brin_xlog_insert_update(XLogReaderState *record,
{ {
XLogRecPtr lsn = record->EndRecPtr; XLogRecPtr lsn = record->EndRecPtr;
Buffer buffer; Buffer buffer;
BlockNumber regpgno; BlockNumber regpgno;
Page page; Page page;
XLogRedoAction action; XLogRedoAction action;

View File

@ -101,7 +101,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs table pages only to this percentage", "Packs table pages only to this percentage",
RELOPT_KIND_HEAP, RELOPT_KIND_HEAP,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100 HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
}, },
@ -110,7 +111,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs btree index pages only to this percentage", "Packs btree index pages only to this percentage",
RELOPT_KIND_BTREE, RELOPT_KIND_BTREE,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100 BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
}, },
@ -119,7 +121,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs hash index pages only to this percentage", "Packs hash index pages only to this percentage",
RELOPT_KIND_HASH, RELOPT_KIND_HASH,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100 HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
}, },
@ -128,7 +131,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs gist index pages only to this percentage", "Packs gist index pages only to this percentage",
RELOPT_KIND_GIST, RELOPT_KIND_GIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100 GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
}, },
@ -137,7 +141,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs spgist index pages only to this percentage", "Packs spgist index pages only to this percentage",
RELOPT_KIND_SPGIST, RELOPT_KIND_SPGIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100 SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
}, },
@ -1475,8 +1480,8 @@ tablespace_reloptions(Datum reloptions, bool validate)
LOCKMODE LOCKMODE
AlterTableGetRelOptionsLockLevel(List *defList) AlterTableGetRelOptionsLockLevel(List *defList)
{ {
LOCKMODE lockmode = NoLock; LOCKMODE lockmode = NoLock;
ListCell *cell; ListCell *cell;
if (defList == NIL) if (defList == NIL)
return AccessExclusiveLock; return AccessExclusiveLock;
@ -1486,8 +1491,8 @@ AlterTableGetRelOptionsLockLevel(List *defList)
foreach(cell, defList) foreach(cell, defList)
{ {
DefElem *def = (DefElem *) lfirst(cell); DefElem *def = (DefElem *) lfirst(cell);
int i; int i;
for (i = 0; relOpts[i]; i++) for (i = 0; relOpts[i]; i++)
{ {

View File

@ -524,7 +524,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
int64 nDeletedHeapTuples = 0; int64 nDeletedHeapTuples = 0;
ginxlogDeleteListPages data; ginxlogDeleteListPages data;
Buffer buffers[GIN_NDELETE_AT_ONCE]; Buffer buffers[GIN_NDELETE_AT_ONCE];
BlockNumber freespace[GIN_NDELETE_AT_ONCE]; BlockNumber freespace[GIN_NDELETE_AT_ONCE];
data.ndeleted = 0; data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead) while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
@ -745,30 +745,29 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
bool inVacuum = (stats == NULL); bool inVacuum = (stats == NULL);
/* /*
* We would like to prevent concurrent cleanup process. For * We would like to prevent concurrent cleanup process. For that we will
* that we will lock metapage in exclusive mode using LockPage() * lock metapage in exclusive mode using LockPage() call. Nobody other
* call. Nobody other will use that lock for metapage, so * will use that lock for metapage, so we keep possibility of concurrent
* we keep possibility of concurrent insertion into pending list * insertion into pending list
*/ */
if (inVacuum) if (inVacuum)
{ {
/* /*
* We are called from [auto]vacuum/analyze or * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
* gin_clean_pending_list() and we would like to wait * and we would like to wait concurrent cleanup to finish.
* concurrent cleanup to finish.
*/ */
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock); LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory = workMemory =
(IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ? (IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ?
autovacuum_work_mem : maintenance_work_mem; autovacuum_work_mem : maintenance_work_mem;
} }
else else
{ {
/* /*
* We are called from regular insert and if we see * We are called from regular insert and if we see concurrent cleanup
* concurrent cleanup just exit in hope that concurrent * just exit in hope that concurrent process will clean up pending
* process will clean up pending list. * list.
*/ */
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock)) if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return; return;
@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
Assert(!GinPageIsDeleted(page)); Assert(!GinPageIsDeleted(page));
/* /*
* Are we walk through the page which as we remember was a tail when we * Are we walk through the page which as we remember was a tail when
* start our cleanup? But if caller asks us to clean up whole pending * we start our cleanup? But if caller asks us to clean up whole
* list then ignore old tail, we will work until list becomes empty. * pending list then ignore old tail, we will work until list becomes
* empty.
*/ */
if (blkno == blknoFinish && full_clean == false) if (blkno == blknoFinish && full_clean == false)
cleanupFinish = true; cleanupFinish = true;
@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
* locking */ * locking */
/* /*
* remove read pages from pending list, at this point all * remove read pages from pending list, at this point all content
* content of read pages is in regular structure * of read pages is in regular structure
*/ */
shiftList(index, metabuffer, blkno, fill_fsm, stats); shiftList(index, metabuffer, blkno, fill_fsm, stats);
@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
ReleaseBuffer(metabuffer); ReleaseBuffer(metabuffer);
/* /*
* As pending list pages can have a high churn rate, it is * As pending list pages can have a high churn rate, it is desirable to
* desirable to recycle them immediately to the FreeSpace Map when * recycle them immediately to the FreeSpace Map when ordinary backends
* ordinary backends clean the list. * clean the list.
*/ */
if (fsm_vac && fill_fsm) if (fsm_vac && fill_fsm)
IndexFreeSpaceMapVacuum(index); IndexFreeSpaceMapVacuum(index);
@ -989,7 +989,7 @@ gin_clean_pending_list(PG_FUNCTION_ARGS)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"), errmsg("recovery is in progress"),
errhint("GIN pending list cannot be cleaned up during recovery."))); errhint("GIN pending list cannot be cleaned up during recovery.")));
/* Must be a GIN index */ /* Must be a GIN index */
if (indexRel->rd_rel->relkind != RELKIND_INDEX || if (indexRel->rd_rel->relkind != RELKIND_INDEX ||

View File

@ -281,7 +281,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
&htup->t_self); &htup->t_self);
/* If we've maxed out our available memory, dump everything to the index */ /* If we've maxed out our available memory, dump everything to the index */
if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L) if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
{ {
ItemPointerData *list; ItemPointerData *list;
Datum key; Datum key;

View File

@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
{ {
/* Yes, so initialize stats to zeroes */ /* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
/* /*
* and cleanup any pending inserts */ * and cleanup any pending inserts
*/
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(), ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats); false, stats);
} }

View File

@ -1498,8 +1498,9 @@ static void
gistvacuumpage(Relation rel, Page page, Buffer buffer) gistvacuumpage(Relation rel, Page page, Buffer buffer)
{ {
OffsetNumber deletable[MaxIndexTuplesPerPage]; OffsetNumber deletable[MaxIndexTuplesPerPage];
int ndeletable = 0; int ndeletable = 0;
OffsetNumber offnum, maxoff; OffsetNumber offnum,
maxoff;
Assert(GistPageIsLeaf(page)); Assert(GistPageIsLeaf(page));

View File

@ -36,13 +36,13 @@
static void static void
gistkillitems(IndexScanDesc scan) gistkillitems(IndexScanDesc scan)
{ {
GISTScanOpaque so = (GISTScanOpaque) scan->opaque; GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
Buffer buffer; Buffer buffer;
Page page; Page page;
OffsetNumber offnum; OffsetNumber offnum;
ItemId iid; ItemId iid;
int i; int i;
bool killedsomething = false; bool killedsomething = false;
Assert(so->curBlkno != InvalidBlockNumber); Assert(so->curBlkno != InvalidBlockNumber);
Assert(!XLogRecPtrIsInvalid(so->curPageLSN)); Assert(!XLogRecPtrIsInvalid(so->curPageLSN));
@ -57,21 +57,22 @@ gistkillitems(IndexScanDesc scan)
page = BufferGetPage(buffer); page = BufferGetPage(buffer);
/* /*
* If page LSN differs it means that the page was modified since the last read. * If page LSN differs it means that the page was modified since the last
* killedItems could be not valid so LP_DEAD hints applying is not safe. * read. killedItems could be not valid so LP_DEAD hints applying is not
* safe.
*/ */
if(PageGetLSN(page) != so->curPageLSN) if (PageGetLSN(page) != so->curPageLSN)
{ {
UnlockReleaseBuffer(buffer); UnlockReleaseBuffer(buffer);
so->numKilled = 0; /* reset counter */ so->numKilled = 0; /* reset counter */
return; return;
} }
Assert(GistPageIsLeaf(page)); Assert(GistPageIsLeaf(page));
/* /*
* Mark all killedItems as dead. We need no additional recheck, * Mark all killedItems as dead. We need no additional recheck, because,
* because, if page was modified, pageLSN must have changed. * if page was modified, pageLSN must have changed.
*/ */
for (i = 0; i < so->numKilled; i++) for (i = 0; i < so->numKilled; i++)
{ {
@ -390,7 +391,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
maxoff = PageGetMaxOffsetNumber(page); maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{ {
ItemId iid = PageGetItemId(page, i); ItemId iid = PageGetItemId(page, i);
IndexTuple it; IndexTuple it;
bool match; bool match;
bool recheck; bool recheck;
@ -400,10 +401,11 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
* If the scan specifies not to return killed tuples, then we treat a * If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual. * killed tuple as not passing the qual.
*/ */
if(scan->ignore_killed_tuples && ItemIdIsDead(iid)) if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue; continue;
it = (IndexTuple) PageGetItem(page, iid); it = (IndexTuple) PageGetItem(page, iid);
/* /*
* Must call gistindex_keytest in tempCxt, and clean up any leftover * Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward. * junk afterward.
@ -665,11 +667,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL) if (so->killedItems == NULL)
{ {
MemoryContext oldCxt = MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt); MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems = so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage (OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber)); * sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt); MemoryContextSwitchTo(oldCxt);
} }
@ -702,11 +704,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL) if (so->killedItems == NULL)
{ {
MemoryContext oldCxt = MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt); MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems = so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage (OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber)); * sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt); MemoryContextSwitchTo(oldCxt);
} }

View File

@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
ScanKey skey = scan->keyData + i; ScanKey skey = scan->keyData + i;
/* /*
* Copy consistent support function to ScanKey structure * Copy consistent support function to ScanKey structure instead
* instead of function implementing filtering operator. * of function implementing filtering operator.
*/ */
fmgr_info_copy(&(skey->sk_func), fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]), &(so->giststate->consistentFn[skey->sk_attno - 1]),
@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid); so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/* /*
* Copy distance support function to ScanKey structure * Copy distance support function to ScanKey structure instead of
* instead of function implementing ordering operator. * function implementing ordering operator.
*/ */
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt); fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);

View File

@ -1687,7 +1687,7 @@ heap_parallelscan_nextpage(HeapScanDesc scan)
{ {
BlockNumber page = InvalidBlockNumber; BlockNumber page = InvalidBlockNumber;
BlockNumber sync_startpage = InvalidBlockNumber; BlockNumber sync_startpage = InvalidBlockNumber;
BlockNumber report_page = InvalidBlockNumber; BlockNumber report_page = InvalidBlockNumber;
ParallelHeapScanDesc parallel_scan; ParallelHeapScanDesc parallel_scan;
Assert(scan->rs_parallel); Assert(scan->rs_parallel);

View File

@ -178,7 +178,7 @@ static void
RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
{ {
Page page; Page page;
BlockNumber blockNum = InvalidBlockNumber, BlockNumber blockNum = InvalidBlockNumber,
firstBlock = InvalidBlockNumber; firstBlock = InvalidBlockNumber;
int extraBlocks = 0; int extraBlocks = 0;
int lockWaiters = 0; int lockWaiters = 0;
@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
return; return;
/* /*
* It might seem like multiplying the number of lock waiters by as much * It might seem like multiplying the number of lock waiters by as much as
* as 20 is too aggressive, but benchmarking revealed that smaller numbers * 20 is too aggressive, but benchmarking revealed that smaller numbers
* were insufficient. 512 is just an arbitrary cap to prevent pathological * were insufficient. 512 is just an arbitrary cap to prevent
* results. * pathological results.
*/ */
extraBlocks = Min(512, lockWaiters * 20); extraBlocks = Min(512, lockWaiters * 20);
@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
} }
/* /*
* Updating the upper levels of the free space map is too expensive * Updating the upper levels of the free space map is too expensive to do
* to do for every block, but it's worth doing once at the end to make * for every block, but it's worth doing once at the end to make sure that
* sure that subsequent insertion activity sees all of those nifty free * subsequent insertion activity sees all of those nifty free pages we
* pages we just inserted. * just inserted.
* *
* Note that we're using the freespace value that was reported for the * Note that we're using the freespace value that was reported for the
* last block we added as if it were the freespace value for every block * last block we added as if it were the freespace value for every block
@ -547,8 +547,8 @@ loop:
} }
/* /*
* In addition to whatever extension we performed above, we always add * In addition to whatever extension we performed above, we always add at
* at least one block to satisfy our own request. * least one block to satisfy our own request.
* *
* XXX This does an lseek - rather expensive - but at the moment it is the * XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is * only way to accurately determine how many blocks are in a relation. Is

View File

@ -105,8 +105,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
OldestXmin = RecentGlobalXmin; OldestXmin = RecentGlobalXmin;
else else
OldestXmin = OldestXmin =
TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin, TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
relation); relation);
Assert(TransactionIdIsValid(OldestXmin)); Assert(TransactionIdIsValid(OldestXmin));

View File

@ -272,7 +272,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
Page page; Page page;
uint8 *map; uint8 *map;
#ifdef TRACE_VISIBILITYMAP #ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk); elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
elog(ERROR, "wrong VM buffer passed to visibilitymap_set"); elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf); page = BufferGetPage(vmBuf);
map = (uint8 *)PageGetContents(page); map = (uint8 *) PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE); LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS)) if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))

View File

@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* Check for a conflict-in as we would if we were going to * Check for a conflict-in as we would if we were going to
* write to this page. We aren't actually going to write, * write to this page. We aren't actually going to write,
* but we want a chance to report SSI conflicts that would * but we want a chance to report SSI conflicts that would
* otherwise be masked by this unique constraint violation. * otherwise be masked by this unique constraint
* violation.
*/ */
CheckForSerializableConflictIn(rel, NULL, buf); CheckForSerializableConflictIn(rel, NULL, buf);

View File

@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* /*
* Check to see if we need to issue one final WAL record for this index, * Check to see if we need to issue one final WAL record for this index,
* which may be needed for correctness on a hot standby node when * which may be needed for correctness on a hot standby node when non-MVCC
* non-MVCC index scans could take place. * index scans could take place.
* *
* If the WAL is replayed in hot standby, the replay process needs to get * If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here. * cleanup locks on all index leaf pages, just as we've been doing here.
@ -1025,13 +1025,13 @@ restart:
if (ndeletable > 0) if (ndeletable > 0)
{ {
/* /*
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes all * Notice that the issued XLOG_BTREE_VACUUM WAL record includes
* information to the replay code to allow it to get a cleanup lock * all information to the replay code to allow it to get a cleanup
* on all pages between the previous lastBlockVacuumed and this page. * lock on all pages between the previous lastBlockVacuumed and
* This ensures that WAL replay locks all leaf pages at some point, * this page. This ensures that WAL replay locks all leaf pages at
* which is important should non-MVCC scans be requested. * some point, which is important should non-MVCC scans be
* This is currently unused on standby, but we record it anyway, so * requested. This is currently unused on standby, but we record
* that the WAL contains the required information. * it anyway, so that the WAL contains the required information.
* *
* Since we can visit leaf pages out-of-order when recursing, * Since we can visit leaf pages out-of-order when recursing,
* replay might end up locking such pages an extra time, but it * replay might end up locking such pages an extra time, but it

View File

@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record)
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record); xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/* /*
* This section of code is thought to be no longer needed, after * This section of code is thought to be no longer needed, after analysis
* analysis of the calling paths. It is retained to allow the code * of the calling paths. It is retained to allow the code to be reinstated
* to be reinstated if a flaw is revealed in that thinking. * if a flaw is revealed in that thinking.
* *
* If we are running non-MVCC scans using this index we need to do some * If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan" * additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra * described in more detail in next paragraphs. We used to do the extra
* work in all cases, whereas we now avoid that work in most cases. * work in all cases, whereas we now avoid that work in most cases. If
* If lastBlockVacuumed is set to InvalidBlockNumber then we skip the * lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan. * additional work required for the pin scan.
* *
* Avoiding this extra work is important since it requires us to touch * Avoiding this extra work is important since it requires us to touch

View File

@ -29,8 +29,8 @@ generic_desc(StringInfo buf, XLogReaderState *record)
while (ptr < end) while (ptr < end)
{ {
OffsetNumber offset, OffsetNumber offset,
length; length;
memcpy(&offset, ptr, sizeof(offset)); memcpy(&offset, ptr, sizeof(offset));
ptr += sizeof(offset); ptr += sizeof(offset);

View File

@ -26,7 +26,7 @@ logicalmsg_desc(StringInfo buf, XLogReaderState *record)
xl_logical_message *xlrec = (xl_logical_message *) rec; xl_logical_message *xlrec = (xl_logical_message *) rec;
appendStringInfo(buf, "%s message size %zu bytes", appendStringInfo(buf, "%s message size %zu bytes",
xlrec->transactional ? "transactional" : "nontransactional", xlrec->transactional ? "transactional" : "nontransactional",
xlrec->message_size); xlrec->message_size);
} }
} }

View File

@ -100,7 +100,7 @@ standby_desc_invalidations(StringInfo buf,
Oid dbId, Oid tsId, Oid dbId, Oid tsId,
bool relcacheInitFileInval) bool relcacheInitFileInval)
{ {
int i; int i;
if (relcacheInitFileInval) if (relcacheInitFileInval)
appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u", appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",

View File

@ -205,8 +205,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
if (parsed.nmsgs > 0) if (parsed.nmsgs > 0)
{ {
standby_desc_invalidations( standby_desc_invalidations(
buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId, buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
XactCompletionRelcacheInitFileInval(parsed.xinfo)); XactCompletionRelcacheInitFileInval(parsed.xinfo));
} }
if (XactCompletionForceSyncCommit(parsed.xinfo)) if (XactCompletionForceSyncCommit(parsed.xinfo))

View File

@ -26,8 +26,8 @@
const struct config_enum_entry wal_level_options[] = { const struct config_enum_entry wal_level_options[] = {
{"minimal", WAL_LEVEL_MINIMAL, false}, {"minimal", WAL_LEVEL_MINIMAL, false},
{"replica", WAL_LEVEL_REPLICA, false}, {"replica", WAL_LEVEL_REPLICA, false},
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */ {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */ {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"logical", WAL_LEVEL_LOGICAL, false}, {"logical", WAL_LEVEL_LOGICAL, false},
{NULL, 0, false} {NULL, 0, false}
}; };

View File

@ -92,7 +92,7 @@ typedef struct CommitTimestampShared
{ {
TransactionId xidLastCommit; TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit; CommitTimestampEntry dataLastCommit;
bool commitTsActive; bool commitTsActive;
} CommitTimestampShared; } CommitTimestampShared;
CommitTimestampShared *commitTsShared; CommitTimestampShared *commitTsShared;
@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
* No-op if the module is not active. * No-op if the module is not active.
* *
* An unlocked read here is fine, because in a standby (the only place * An unlocked read here is fine, because in a standby (the only place
* where the flag can change in flight) this routine is only called by * where the flag can change in flight) this routine is only called by the
* the recovery process, which is also the only process which can change * recovery process, which is also the only process which can change the
* the flag. * flag.
*/ */
if (!commitTsShared->commitTsActive) if (!commitTsShared->commitTsActive)
return; return;
@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact)
int pageno; int pageno;
/* /*
* Nothing to do if module not enabled. Note we do an unlocked read of the * Nothing to do if module not enabled. Note we do an unlocked read of
* flag here, which is okay because this routine is only called from * the flag here, which is okay because this routine is only called from
* GetNewTransactionId, which is never called in a standby. * GetNewTransactionId, which is never called in a standby.
*/ */
Assert(!InRecovery); Assert(!InRecovery);
@ -855,7 +855,7 @@ AdvanceOldestCommitTsXid(TransactionId oldestXact)
{ {
LWLockAcquire(CommitTsLock, LW_EXCLUSIVE); LWLockAcquire(CommitTsLock, LW_EXCLUSIVE);
if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId && if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId &&
TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact)) TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
ShmemVariableCache->oldestCommitTsXid = oldestXact; ShmemVariableCache->oldestCommitTsXid = oldestXact;
LWLockRelease(CommitTsLock); LWLockRelease(CommitTsLock);
} }

View File

@ -52,9 +52,8 @@ typedef struct
Buffer buffer; /* registered buffer */ Buffer buffer; /* registered buffer */
int flags; /* flags for this buffer */ int flags; /* flags for this buffer */
int deltaLen; /* space consumed in delta field */ int deltaLen; /* space consumed in delta field */
char *image; /* copy of page image for modification, char *image; /* copy of page image for modification, do not
* do not do it in-place to have aligned * do it in-place to have aligned memory chunk */
* memory chunk */
char delta[MAX_DELTA_SIZE]; /* delta between page images */ char delta[MAX_DELTA_SIZE]; /* delta between page images */
} PageData; } PageData;

View File

@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
char *oldest_datname = get_database_name(oldest_datoid); char *oldest_datname = get_database_name(oldest_datoid);
/* /*
* Immediately kick autovacuum into action as we're already * Immediately kick autovacuum into action as we're already in
* in ERROR territory. * ERROR territory.
*/ */
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER); SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
@ -1134,8 +1134,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used", errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used",
"database with OID %u must be vacuumed before %d more multixact members are used", "database with OID %u must be vacuumed before %d more multixact members are used",
MultiXactState->offsetStopLimit - nextOffset + nmembers, MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB, MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers), MultiXactState->offsetStopLimit - nextOffset + nmembers),
errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings."))); errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));

View File

@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
nworkers = 0; nworkers = 0;
/* /*
* If we are running under serializable isolation, we can't use * If we are running under serializable isolation, we can't use parallel
* parallel workers, at least not until somebody enhances that mechanism * workers, at least not until somebody enhances that mechanism to be
* to be parallel-aware. * parallel-aware.
*/ */
if (IsolationIsSerializable()) if (IsolationIsSerializable())
nworkers = 0; nworkers = 0;
@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt)
} }
/* /*
* We can't finish transaction commit or abort until all of the * We can't finish transaction commit or abort until all of the workers
* workers have exited. This means, in particular, that we can't respond * have exited. This means, in particular, that we can't respond to
* to interrupts at this stage. * interrupts at this stage.
*/ */
HOLD_INTERRUPTS(); HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt); WaitForParallelWorkersToExit(pcxt);
@ -918,7 +918,7 @@ ParallelWorkerMain(Datum main_arg)
if (toc == NULL) if (toc == NULL)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid magic number in dynamic shared memory segment"))); errmsg("invalid magic number in dynamic shared memory segment")));
/* Look up fixed parallel state. */ /* Look up fixed parallel state. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED); fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg)
*/ */
/* /*
* Join locking group. We must do this before anything that could try * Join locking group. We must do this before anything that could try to
* to acquire a heavyweight lock, because any heavyweight locks acquired * acquire a heavyweight lock, because any heavyweight locks acquired to
* to this point could block either directly against the parallel group * this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that * leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected * conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away, * deadlock. (If we can't join the lock group, the leader has gone away,

View File

@ -152,7 +152,7 @@ SimpleLruShmemSize(int nslots, int nlsns)
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */ sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */ sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
if (nlsns > 0) if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */ sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
@ -224,7 +224,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
for (slotno = 0; slotno < nslots; slotno++) for (slotno = 0; slotno < nslots; slotno++)
{ {
LWLockInitialize(&shared->buffer_locks[slotno].lock, LWLockInitialize(&shared->buffer_locks[slotno].lock,
shared->lwlock_tranche_id); shared->lwlock_tranche_id);
shared->page_buffer[slotno] = ptr; shared->page_buffer[slotno] = ptr;
shared->page_status[slotno] = SLRU_PAGE_EMPTY; shared->page_status[slotno] = SLRU_PAGE_EMPTY;

View File

@ -257,7 +257,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID)
startPage++; startPage++;
/* must account for wraparound */ /* must account for wraparound */
if (startPage > TransactionIdToPage(MaxTransactionId)) if (startPage > TransactionIdToPage(MaxTransactionId))
startPage=0; startPage = 0;
} }
(void) ZeroSUBTRANSPage(startPage); (void) ZeroSUBTRANSPage(startPage);

View File

@ -140,13 +140,13 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */ TimestampTz prepared_at; /* time of preparation */
/* /*
* Note that we need to keep track of two LSNs for each GXACT. * Note that we need to keep track of two LSNs for each GXACT. We keep
* We keep track of the start LSN because this is the address we must * track of the start LSN because this is the address we must use to read
* use to read state data back from WAL when committing a prepared GXACT. * state data back from WAL when committing a prepared GXACT. We keep
* We keep track of the end LSN because that is the LSN we need to wait * track of the end LSN because that is the LSN we need to wait for prior
* for prior to commit. * to commit.
*/ */
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */ XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
Oid owner; /* ID of user that executed the xact */ Oid owner; /* ID of user that executed the xact */
@ -980,7 +980,7 @@ StartPrepare(GlobalTransaction gxact)
hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels); hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels);
hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs, hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs,
&hdr.initfileinval); &hdr.initfileinval);
hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */ hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
save_state_data(&hdr, sizeof(TwoPhaseFileHeader)); save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
save_state_data(gxact->gid, hdr.gidlen); save_state_data(gxact->gid, hdr.gidlen);
@ -1259,28 +1259,28 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY), (errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"), errmsg("out of memory"),
errdetail("Failed while allocating an XLog reading processor."))); errdetail("Failed while allocating an XLog reading processor.")));
record = XLogReadRecord(xlogreader, lsn, &errormsg); record = XLogReadRecord(xlogreader, lsn, &errormsg);
if (record == NULL) if (record == NULL)
ereport(ERROR, ereport(ERROR,
(errcode_for_file_access(), (errcode_for_file_access(),
errmsg("could not read two-phase state from xlog at %X/%X", errmsg("could not read two-phase state from xlog at %X/%X",
(uint32) (lsn >> 32), (uint32) (lsn >> 32),
(uint32) lsn))); (uint32) lsn)));
if (XLogRecGetRmid(xlogreader) != RM_XACT_ID || if (XLogRecGetRmid(xlogreader) != RM_XACT_ID ||
(XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE) (XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
ereport(ERROR, ereport(ERROR,
(errcode_for_file_access(), (errcode_for_file_access(),
errmsg("expected two-phase state data is not present in xlog at %X/%X", errmsg("expected two-phase state data is not present in xlog at %X/%X",
(uint32) (lsn >> 32), (uint32) (lsn >> 32),
(uint32) lsn))); (uint32) lsn)));
if (len != NULL) if (len != NULL)
*len = XLogRecGetDataLen(xlogreader); *len = XLogRecGetDataLen(xlogreader);
*buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader)); *buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader)); memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
XLogReaderFree(xlogreader); XLogReaderFree(xlogreader);
@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
xid = pgxact->xid; xid = pgxact->xid;
/* /*
* Read and validate 2PC state data. * Read and validate 2PC state data. State data will typically be stored
* State data will typically be stored in WAL files if the LSN is after the * in WAL files if the LSN is after the last checkpoint record, or moved
* last checkpoint record, or moved to disk if for some reason they have * to disk if for some reason they have lived for a long time.
* lived for a long time.
*/ */
if (gxact->ondisk) if (gxact->ondisk)
buf = ReadTwoPhaseFile(xid, true); buf = ReadTwoPhaseFile(xid, true);
@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START(); TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
/* /*
* We are expecting there to be zero GXACTs that need to be * We are expecting there to be zero GXACTs that need to be copied to
* copied to disk, so we perform all I/O while holding * disk, so we perform all I/O while holding TwoPhaseStateLock for
* TwoPhaseStateLock for simplicity. This prevents any new xacts * simplicity. This prevents any new xacts from preparing while this
* from preparing while this occurs, which shouldn't be a problem * occurs, which shouldn't be a problem since the presence of long-lived
* since the presence of long-lived prepared xacts indicates the * prepared xacts indicates the transaction manager isn't active.
* transaction manager isn't active.
* *
* It's also possible to move I/O out of the lock, but on * It's also possible to move I/O out of the lock, but on every error we
* every error we should check whether somebody committed our * should check whether somebody committed our transaction in different
* transaction in different backend. Let's leave this optimisation * backend. Let's leave this optimisation for future, if somebody will
* for future, if somebody will spot that this place cause * spot that this place cause bottleneck.
* bottleneck.
* *
* Note that it isn't possible for there to be a GXACT with * Note that it isn't possible for there to be a GXACT with a
* a prepare_end_lsn set prior to the last checkpoint yet * prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
* is marked invalid, because of the efforts with delayChkpt. * because of the efforts with delayChkpt.
*/ */
LWLockAcquire(TwoPhaseStateLock, LW_SHARED); LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++) for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
@ -1633,7 +1630,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
gxact->prepare_end_lsn <= redo_horizon) gxact->prepare_end_lsn <= redo_horizon)
{ {
char *buf; char *buf;
int len; int len;
XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len); XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len);
RecreateTwoPhaseFile(pgxact->xid, buf, len); RecreateTwoPhaseFile(pgxact->xid, buf, len);
@ -1920,7 +1917,7 @@ RecoverPreparedTransactions(void)
TwoPhaseFileHeader *hdr; TwoPhaseFileHeader *hdr;
TransactionId *subxids; TransactionId *subxids;
GlobalTransaction gxact; GlobalTransaction gxact;
const char *gid; const char *gid;
int i; int i;
xid = (TransactionId) strtoul(clde->d_name, NULL, 16); xid = (TransactionId) strtoul(clde->d_name, NULL, 16);

View File

@ -1166,19 +1166,19 @@ RecordTransactionCommit(void)
/* /*
* Transactions without an assigned xid can contain invalidation * Transactions without an assigned xid can contain invalidation
* messages (e.g. explicit relcache invalidations or catcache * messages (e.g. explicit relcache invalidations or catcache
* invalidations for inplace updates); standbys need to process * invalidations for inplace updates); standbys need to process those.
* those. We can't emit a commit record without an xid, and we don't * We can't emit a commit record without an xid, and we don't want to
* want to force assigning an xid, because that'd be problematic for * force assigning an xid, because that'd be problematic for e.g.
* e.g. vacuum. Hence we emit a bespoke record for the * vacuum. Hence we emit a bespoke record for the invalidations. We
* invalidations. We don't want to use that in case a commit record is * don't want to use that in case a commit record is emitted, so they
* emitted, so they happen synchronously with commits (besides not * happen synchronously with commits (besides not wanting to emit more
* wanting to emit more WAL recoreds). * WAL recoreds).
*/ */
if (nmsgs != 0) if (nmsgs != 0)
{ {
LogStandbyInvalidations(nmsgs, invalMessages, LogStandbyInvalidations(nmsgs, invalMessages,
RelcacheInitFileInval); RelcacheInitFileInval);
wrote_xlog = true; /* not strictly necessary */ wrote_xlog = true; /* not strictly necessary */
} }
/* /*
@ -1272,8 +1272,8 @@ RecordTransactionCommit(void)
* this case, but we don't currently try to do that. It would certainly * this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the * cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It * KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < replica, but for now * might be OK to skip it only when wal_level < replica, but for now we
* we don't.) * don't.)
* *
* However, if we're doing cleanup of any non-temp rels or committing any * However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG * command that wanted to force sync commit, then we must flush XLOG
@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
/* /*
* If asked by the primary (because someone is waiting for a synchronous * If asked by the primary (because someone is waiting for a synchronous
* commit = remote_apply), we will need to ask walreceiver to send a * commit = remote_apply), we will need to ask walreceiver to send a reply
* reply immediately. * immediately.
*/ */
if (XactCompletionApplyFeedback(parsed->xinfo)) if (XactCompletionApplyFeedback(parsed->xinfo))
XLogRequestWalReceiverReply(); XLogRequestWalReceiverReply();

View File

@ -5004,9 +5004,9 @@ readRecoveryCommandFile(void)
else else
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"", errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target_action", "recovery_target_action",
item->value), item->value),
errhint("Valid values are \"pause\", \"promote\", and \"shutdown\"."))); errhint("Valid values are \"pause\", \"promote\", and \"shutdown\".")));
ereport(DEBUG2, ereport(DEBUG2,
@ -5087,9 +5087,9 @@ readRecoveryCommandFile(void)
else else
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"", errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target", "recovery_target",
item->value), item->value),
errhint("The only allowed value is \"immediate\"."))); errhint("The only allowed value is \"immediate\".")));
ereport(DEBUG2, ereport(DEBUG2,
(errmsg_internal("recovery_target = '%s'", (errmsg_internal("recovery_target = '%s'",
@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void)
} }
/* /*
* For Hot Standby, the WAL must be generated with 'replica' mode, and * For Hot Standby, the WAL must be generated with 'replica' mode, and we
* we must have at least as many backend slots as the primary. * must have at least as many backend slots as the primary.
*/ */
if (ArchiveRecoveryRequested && EnableHotStandby) if (ArchiveRecoveryRequested && EnableHotStandby)
{ {
@ -6163,26 +6163,26 @@ StartupXLOG(void)
* is no use of such file. There is no harm in retaining it, but it * is no use of such file. There is no harm in retaining it, but it
* is better to get rid of the map file so that we don't have any * is better to get rid of the map file so that we don't have any
* redundant file in data directory and it will avoid any sort of * redundant file in data directory and it will avoid any sort of
* confusion. It seems prudent though to just rename the file out * confusion. It seems prudent though to just rename the file out of
* of the way rather than delete it completely, also we ignore any * the way rather than delete it completely, also we ignore any error
* error that occurs in rename operation as even if map file is * that occurs in rename operation as even if map file is present
* present without backup_label file, it is harmless. * without backup_label file, it is harmless.
*/ */
if (stat(TABLESPACE_MAP, &st) == 0) if (stat(TABLESPACE_MAP, &st) == 0)
{ {
unlink(TABLESPACE_MAP_OLD); unlink(TABLESPACE_MAP_OLD);
if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0) if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0)
ereport(LOG, ereport(LOG,
(errmsg("ignoring file \"%s\" because no file \"%s\" exists", (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE), TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("File \"%s\" was renamed to \"%s\".", errdetail("File \"%s\" was renamed to \"%s\".",
TABLESPACE_MAP, TABLESPACE_MAP_OLD))); TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
else else
ereport(LOG, ereport(LOG,
(errmsg("ignoring file \"%s\" because no file \"%s\" exists", (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE), TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("Could not rename file \"%s\" to \"%s\": %m.", errdetail("Could not rename file \"%s\" to \"%s\": %m.",
TABLESPACE_MAP, TABLESPACE_MAP_OLD))); TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
} }
/* /*
@ -6314,24 +6314,24 @@ StartupXLOG(void)
ereport(DEBUG1, ereport(DEBUG1,
(errmsg_internal("redo record is at %X/%X; shutdown %s", (errmsg_internal("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo, (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE"))); wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1, ereport(DEBUG1,
(errmsg_internal("next transaction ID: %u:%u; next OID: %u", (errmsg_internal("next transaction ID: %u:%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid, checkPoint.nextXidEpoch, checkPoint.nextXid,
checkPoint.nextOid))); checkPoint.nextOid)));
ereport(DEBUG1, ereport(DEBUG1,
(errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u", (errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u",
checkPoint.nextMulti, checkPoint.nextMultiOffset))); checkPoint.nextMulti, checkPoint.nextMultiOffset)));
ereport(DEBUG1, ereport(DEBUG1,
(errmsg_internal("oldest unfrozen transaction ID: %u, in database %u", (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
checkPoint.oldestXid, checkPoint.oldestXidDB))); checkPoint.oldestXid, checkPoint.oldestXidDB)));
ereport(DEBUG1, ereport(DEBUG1,
(errmsg_internal("oldest MultiXactId: %u, in database %u", (errmsg_internal("oldest MultiXactId: %u, in database %u",
checkPoint.oldestMulti, checkPoint.oldestMultiDB))); checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
ereport(DEBUG1, ereport(DEBUG1,
(errmsg_internal("commit timestamp Xid oldest/newest: %u/%u", (errmsg_internal("commit timestamp Xid oldest/newest: %u/%u",
checkPoint.oldestCommitTsXid, checkPoint.oldestCommitTsXid,
checkPoint.newestCommitTsXid))); checkPoint.newestCommitTsXid)));
if (!TransactionIdIsNormal(checkPoint.nextXid)) if (!TransactionIdIsNormal(checkPoint.nextXid))
ereport(PANIC, ereport(PANIC,
(errmsg("invalid next transaction ID"))); (errmsg("invalid next transaction ID")));
@ -6883,8 +6883,8 @@ StartupXLOG(void)
SpinLockRelease(&XLogCtl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* /*
* If rm_redo called XLogRequestWalReceiverReply, then we * If rm_redo called XLogRequestWalReceiverReply, then we wake
* wake up the receiver so that it notices the updated * up the receiver so that it notices the updated
* lastReplayedEndRecPtr and sends a reply to the master. * lastReplayedEndRecPtr and sends a reply to the master.
*/ */
if (doRequestWalReceiverReply) if (doRequestWalReceiverReply)

View File

@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContext oldcontext; MemoryContext oldcontext;
/* /*
* Label file and tablespace map file need to be long-lived, since they * Label file and tablespace map file need to be long-lived, since
* are read in pg_stop_backup. * they are read in pg_stop_backup.
*/ */
oldcontext = MemoryContextSwitchTo(TopMemoryContext); oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo(); label_file = makeStringInfo();
@ -113,7 +113,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(oldcontext); MemoryContextSwitchTo(oldcontext);
startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file, startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file,
dir, NULL, tblspc_map_file, false, true); dir, NULL, tblspc_map_file, false, true);
nonexclusive_backup_running = true; nonexclusive_backup_running = true;
before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0); before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0);
@ -138,8 +138,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
* Note: different from CancelBackup which just cancels online backup mode. * Note: different from CancelBackup which just cancels online backup mode.
* *
* Note: this version is only called to stop an exclusive backup. The function * Note: this version is only called to stop an exclusive backup. The function
* pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
* stop non-exclusive backups. * stop non-exclusive backups.
* *
* Permission checking for this function is managed through the normal * Permission checking for this function is managed through the normal
* GRANT system. * GRANT system.
@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('f')?"))); errhint("Did you mean to use pg_stop_backup('f')?")));
/* /*
* Exclusive backups were typically started in a different connection, * Exclusive backups were typically started in a different connection, so
* so don't try to verify that exclusive_backup_running is set in this one. * don't try to verify that exclusive_backup_running is set in this one.
* Actual verification that an exclusive backup is in fact running is handled * Actual verification that an exclusive backup is in fact running is
* inside do_pg_stop_backup. * handled inside do_pg_stop_backup.
*/ */
stoppoint = do_pg_stop_backup(NULL, true, NULL); stoppoint = do_pg_stop_backup(NULL, true, NULL);
@ -182,16 +182,16 @@ pg_stop_backup(PG_FUNCTION_ARGS)
Datum Datum
pg_stop_backup_v2(PG_FUNCTION_ARGS) pg_stop_backup_v2(PG_FUNCTION_ARGS)
{ {
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc; TupleDesc tupdesc;
Tuplestorestate *tupstore; Tuplestorestate *tupstore;
MemoryContext per_query_ctx; MemoryContext per_query_ctx;
MemoryContext oldcontext; MemoryContext oldcontext;
Datum values[3]; Datum values[3];
bool nulls[3]; bool nulls[3];
bool exclusive = PG_GETARG_BOOL(0); bool exclusive = PG_GETARG_BOOL(0);
XLogRecPtr stoppoint; XLogRecPtr stoppoint;
/* check to see if caller supports us returning a tuplestore */ /* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('t')?"))); errhint("Did you mean to use pg_stop_backup('t')?")));
/* /*
* Stop the non-exclusive backup. Return a copy of the backup * Stop the non-exclusive backup. Return a copy of the backup label
* label and tablespace map so they can be written to disk by * and tablespace map so they can be written to disk by the caller.
* the caller.
*/ */
stoppoint = do_pg_stop_backup(label_file->data, true, NULL); stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
nonexclusive_backup_running = false; nonexclusive_backup_running = false;
@ -269,7 +268,7 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
} }
/* Stoppoint is included on both exclusive and nonexclusive backups */ /* Stoppoint is included on both exclusive and nonexclusive backups */
values[0] = LSNGetDatum(stoppoint); values[0] = LSNGetDatum(stoppoint);
tuplestore_putvalues(tupstore, tupdesc, values, nulls); tuplestore_putvalues(tupstore, tupdesc, values, nulls);
tuplestore_donestoring(typstore); tuplestore_donestoring(typstore);

View File

@ -322,7 +322,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
if (total_len < SizeOfXLogRecord) if (total_len < SizeOfXLogRecord)
{ {
report_invalid_record(state, report_invalid_record(state,
"invalid record length at %X/%X: wanted %u, got %u", "invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr, (uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, total_len); (uint32) SizeOfXLogRecord, total_len);
goto err; goto err;
@ -621,7 +621,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
if (record->xl_tot_len < SizeOfXLogRecord) if (record->xl_tot_len < SizeOfXLogRecord)
{ {
report_invalid_record(state, report_invalid_record(state,
"invalid record length at %X/%X: wanted %u, got %u", "invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr, (uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, record->xl_tot_len); (uint32) SizeOfXLogRecord, record->xl_tot_len);
return false; return false;

View File

@ -1792,7 +1792,7 @@ get_object_address_defacl(List *objname, List *objargs, bool missing_ok)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("unrecognized default ACL object type %c", objtype), errmsg("unrecognized default ACL object type %c", objtype),
errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\"."))); errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\".")));
} }
/* /*

View File

@ -82,9 +82,9 @@ AggregateCreate(const char *aggName,
Form_pg_proc proc; Form_pg_proc proc;
Oid transfn; Oid transfn;
Oid finalfn = InvalidOid; /* can be omitted */ Oid finalfn = InvalidOid; /* can be omitted */
Oid combinefn = InvalidOid; /* can be omitted */ Oid combinefn = InvalidOid; /* can be omitted */
Oid serialfn = InvalidOid; /* can be omitted */ Oid serialfn = InvalidOid; /* can be omitted */
Oid deserialfn = InvalidOid; /* can be omitted */ Oid deserialfn = InvalidOid; /* can be omitted */
Oid mtransfn = InvalidOid; /* can be omitted */ Oid mtransfn = InvalidOid; /* can be omitted */
Oid minvtransfn = InvalidOid; /* can be omitted */ Oid minvtransfn = InvalidOid; /* can be omitted */
Oid mfinalfn = InvalidOid; /* can be omitted */ Oid mfinalfn = InvalidOid; /* can be omitted */
@ -407,11 +407,11 @@ AggregateCreate(const char *aggName,
/* handle the combinefn, if supplied */ /* handle the combinefn, if supplied */
if (aggcombinefnName) if (aggcombinefnName)
{ {
Oid combineType; Oid combineType;
/* /*
* Combine function must have 2 argument, each of which is the * Combine function must have 2 argument, each of which is the trans
* trans type * type
*/ */
fnArgs[0] = aggTransType; fnArgs[0] = aggTransType;
fnArgs[1] = aggTransType; fnArgs[1] = aggTransType;
@ -423,9 +423,9 @@ AggregateCreate(const char *aggName,
if (combineType != aggTransType) if (combineType != aggTransType)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH), (errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("return type of combine function %s is not %s", errmsg("return type of combine function %s is not %s",
NameListToString(aggcombinefnName), NameListToString(aggcombinefnName),
format_type_be(aggTransType)))); format_type_be(aggTransType))));
/* /*
* A combine function to combine INTERNAL states must accept nulls and * A combine function to combine INTERNAL states must accept nulls and
@ -440,8 +440,9 @@ AggregateCreate(const char *aggName,
} }
/* /*
* Validate the serialization function, if present. We must ensure that the * Validate the serialization function, if present. We must ensure that
* return type of this function is the same as the specified serialType. * the return type of this function is the same as the specified
* serialType.
*/ */
if (aggserialfnName) if (aggserialfnName)
{ {
@ -454,9 +455,9 @@ AggregateCreate(const char *aggName,
if (rettype != aggSerialType) if (rettype != aggSerialType)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH), (errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("return type of serialization function %s is not %s", errmsg("return type of serialization function %s is not %s",
NameListToString(aggserialfnName), NameListToString(aggserialfnName),
format_type_be(aggSerialType)))); format_type_be(aggSerialType))));
} }
/* /*
@ -474,9 +475,9 @@ AggregateCreate(const char *aggName,
if (rettype != aggTransType) if (rettype != aggTransType)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH), (errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("return type of deserialization function %s is not %s", errmsg("return type of deserialization function %s is not %s",
NameListToString(aggdeserialfnName), NameListToString(aggdeserialfnName),
format_type_be(aggTransType)))); format_type_be(aggTransType))));
} }
/* /*

View File

@ -338,14 +338,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/* /*
* There's little point in having a serialization/deserialization * There's little point in having a serialization/deserialization
* function on aggregates that don't have an internal state, so let's * function on aggregates that don't have an internal state, so let's
* just disallow this as it may help clear up any confusion or needless * just disallow this as it may help clear up any confusion or
* authoring of these functions. * needless authoring of these functions.
*/ */
if (transTypeId != INTERNALOID) if (transTypeId != INTERNALOID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("a serialization type must only be specified when the aggregate transition data type is %s", errmsg("a serialization type must only be specified when the aggregate transition data type is %s",
format_type_be(INTERNALOID)))); format_type_be(INTERNALOID))));
serialTypeId = typenameTypeId(NULL, serialType); serialTypeId = typenameTypeId(NULL, serialType);
@ -358,15 +358,15 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/* /*
* We disallow INTERNAL serialType as the whole point of the * We disallow INTERNAL serialType as the whole point of the
* serialized types is to allow the aggregate state to be output, * serialized types is to allow the aggregate state to be output, and
* and we cannot output INTERNAL. This check, combined with the one * we cannot output INTERNAL. This check, combined with the one above
* above ensures that the trans type and serialization type are not the * ensures that the trans type and serialization type are not the
* same. * same.
*/ */
if (serialTypeId == INTERNALOID) if (serialTypeId == INTERNALOID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("aggregate serialization data type cannot be %s", errmsg("aggregate serialization data type cannot be %s",
format_type_be(serialTypeId)))); format_type_be(serialTypeId))));
/* /*
@ -392,14 +392,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
*/ */
if (serialfuncName != NIL) if (serialfuncName != NIL)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("must specify serialization type when specifying serialization function"))); errmsg("must specify serialization type when specifying serialization function")));
/* likewise for the deserialization function */ /* likewise for the deserialization function */
if (deserialfuncName != NIL) if (deserialfuncName != NIL)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("must specify serialization type when specifying deserialization function"))); errmsg("must specify serialization type when specifying deserialization function")));
} }
/* /*
@ -493,7 +493,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
mfinalfuncExtraArgs, mfinalfuncExtraArgs,
sortoperatorName, /* sort operator name */ sortoperatorName, /* sort operator name */
transTypeId, /* transition data type */ transTypeId, /* transition data type */
serialTypeId, /* serialization data type */ serialTypeId, /* serialization data type */
transSpace, /* transition space */ transSpace, /* transition space */
mtransTypeId, /* transition data type */ mtransTypeId, /* transition data type */
mtransSpace, /* transition space */ mtransSpace, /* transition space */

View File

@ -400,18 +400,17 @@ ExecRenameStmt(RenameStmt *stmt)
ObjectAddress ObjectAddress
ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddress) ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddress)
{ {
ObjectAddress address; ObjectAddress address;
ObjectAddress refAddr; ObjectAddress refAddr;
Relation rel; Relation rel;
address = address =
get_object_address_rv(stmt->objectType, stmt->relation, stmt->objname, get_object_address_rv(stmt->objectType, stmt->relation, stmt->objname,
stmt->objargs, &rel, AccessExclusiveLock, false); stmt->objargs, &rel, AccessExclusiveLock, false);
/* /*
* If a relation was involved, it would have been opened and locked. * If a relation was involved, it would have been opened and locked. We
* We don't need the relation here, but we'll retain the lock until * don't need the relation here, but we'll retain the lock until commit.
* commit.
*/ */
if (rel) if (rel)
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -630,8 +629,8 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid)
oldNspOid = DatumGetObjectId(namespace); oldNspOid = DatumGetObjectId(namespace);
/* /*
* If the object is already in the correct namespace, we don't need * If the object is already in the correct namespace, we don't need to do
* to do anything except fire the object access hook. * anything except fire the object access hook.
*/ */
if (oldNspOid == nspOid) if (oldNspOid == nspOid)
{ {

View File

@ -138,7 +138,7 @@ RemoveAccessMethodById(Oid amOid)
/* /*
* get_am_type_oid * get_am_type_oid
* Worker for various get_am_*_oid variants * Worker for various get_am_*_oid variants
* *
* If missing_ok is false, throw an error if access method not found. If * If missing_ok is false, throw an error if access method not found. If
* true, just return InvalidOid. * true, just return InvalidOid.
@ -188,7 +188,7 @@ get_index_am_oid(const char *amname, bool missing_ok)
/* /*
* get_am_oid - given an access method name, look up its OID. * get_am_oid - given an access method name, look up its OID.
* The type is not checked. * The type is not checked.
*/ */
Oid Oid
get_am_oid(const char *amname, bool missing_ok) get_am_oid(const char *amname, bool missing_ok)

View File

@ -570,7 +570,7 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params,
*/ */
if (!inh) if (!inh)
{ {
BlockNumber relallvisible; BlockNumber relallvisible;
visibilitymap_count(onerel, &relallvisible, NULL); visibilitymap_count(onerel, &relallvisible, NULL);

View File

@ -85,8 +85,8 @@ CreateConversionCommand(CreateConversionStmt *stmt)
if (get_func_rettype(funcoid) != VOIDOID) if (get_func_rettype(funcoid) != VOIDOID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION), (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("encoding conversion function %s must return type %s", errmsg("encoding conversion function %s must return type %s",
NameListToString(func_name), "void"))); NameListToString(func_name), "void")));
/* Check we have EXECUTE rights for the function */ /* Check we have EXECUTE rights for the function */
aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE); aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);

View File

@ -875,7 +875,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
if (is_from) if (is_from)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY FROM not supported with row-level security"), errmsg("COPY FROM not supported with row-level security"),
errhint("Use INSERT statements instead."))); errhint("Use INSERT statements instead.")));
/* Build target list */ /* Build target list */
@ -1399,16 +1399,16 @@ BeginCopy(bool is_from,
{ {
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DO INSTEAD NOTHING rules are not supported for COPY"))); errmsg("DO INSTEAD NOTHING rules are not supported for COPY")));
} }
else if (list_length(rewritten) > 1) else if (list_length(rewritten) > 1)
{ {
ListCell *lc; ListCell *lc;
/* examine queries to determine which error message to issue */ /* examine queries to determine which error message to issue */
foreach(lc, rewritten) foreach(lc, rewritten)
{ {
Query *q = (Query *) lfirst(lc); Query *q = (Query *) lfirst(lc);
if (q->querySource == QSRC_QUAL_INSTEAD_RULE) if (q->querySource == QSRC_QUAL_INSTEAD_RULE)
ereport(ERROR, ereport(ERROR,
@ -1417,7 +1417,7 @@ BeginCopy(bool is_from,
if (q->querySource == QSRC_NON_INSTEAD_RULE) if (q->querySource == QSRC_NON_INSTEAD_RULE)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DO ALSO rules are not supported for the COPY"))); errmsg("DO ALSO rules are not supported for the COPY")));
} }
ereport(ERROR, ereport(ERROR,
@ -1448,8 +1448,8 @@ BeginCopy(bool is_from,
query->commandType == CMD_DELETE); query->commandType == CMD_DELETE);
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY query must have a RETURNING clause"))); errmsg("COPY query must have a RETURNING clause")));
} }
/* plan the query */ /* plan the query */

View File

@ -1419,7 +1419,7 @@ CreateExtensionInternal(CreateExtensionStmt *stmt, List *parents)
CreateExtensionStmt *ces; CreateExtensionStmt *ces;
ListCell *lc; ListCell *lc;
ObjectAddress addr; ObjectAddress addr;
List *cascade_parents; List *cascade_parents;
/* Check extension name validity before trying to cascade */ /* Check extension name validity before trying to cascade */
check_valid_extension_name(curreq); check_valid_extension_name(curreq);

View File

@ -487,7 +487,7 @@ lookup_fdw_handler_func(DefElem *handler)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s", errmsg("function %s must return type %s",
NameListToString((List *) handler->arg), "fdw_handler"))); NameListToString((List *) handler->arg), "fdw_handler")));
return handlerOid; return handlerOid;
} }

View File

@ -217,21 +217,20 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
RelationGetRelationName(matviewRel)); RelationGetRelationName(matviewRel));
/* /*
* Check that there is a unique index with no WHERE clause on * Check that there is a unique index with no WHERE clause on one or more
* one or more columns of the materialized view if CONCURRENTLY * columns of the materialized view if CONCURRENTLY is specified.
* is specified.
*/ */
if (concurrent) if (concurrent)
{ {
List *indexoidlist = RelationGetIndexList(matviewRel); List *indexoidlist = RelationGetIndexList(matviewRel);
ListCell *indexoidscan; ListCell *indexoidscan;
bool hasUniqueIndex = false; bool hasUniqueIndex = false;
foreach(indexoidscan, indexoidlist) foreach(indexoidscan, indexoidlist)
{ {
Oid indexoid = lfirst_oid(indexoidscan); Oid indexoid = lfirst_oid(indexoidscan);
Relation indexRel; Relation indexRel;
Form_pg_index indexStruct; Form_pg_index indexStruct;
indexRel = index_open(indexoid, AccessShareLock); indexRel = index_open(indexoid, AccessShareLock);
indexStruct = indexRel->rd_index; indexStruct = indexRel->rd_index;
@ -255,9 +254,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
if (!hasUniqueIndex) if (!hasUniqueIndex)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot refresh materialized view \"%s\" concurrently", errmsg("cannot refresh materialized view \"%s\" concurrently",
quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
RelationGetRelationName(matviewRel))), RelationGetRelationName(matviewRel))),
errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view."))); errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view.")));
} }
@ -745,8 +744,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
/* /*
* There must be at least one unique index on the matview. * There must be at least one unique index on the matview.
* *
* ExecRefreshMatView() checks that after taking the exclusive lock on * ExecRefreshMatView() checks that after taking the exclusive lock on the
* the matview. So at least one unique index is guaranteed to exist here * matview. So at least one unique index is guaranteed to exist here
* because the lock is still being held. * because the lock is still being held.
*/ */
Assert(foundUniqueIndex); Assert(foundUniqueIndex);

View File

@ -275,8 +275,8 @@ ValidateRestrictionEstimator(List *restrictionName)
if (get_func_rettype(restrictionOid) != FLOAT8OID) if (get_func_rettype(restrictionOid) != FLOAT8OID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION), (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("restriction estimator function %s must return type %s", errmsg("restriction estimator function %s must return type %s",
NameListToString(restrictionName), "float8"))); NameListToString(restrictionName), "float8")));
/* Require EXECUTE rights for the estimator */ /* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE); aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE);
@ -321,8 +321,8 @@ ValidateJoinEstimator(List *joinName)
if (get_func_rettype(joinOid) != FLOAT8OID) if (get_func_rettype(joinOid) != FLOAT8OID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION), (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("join estimator function %s must return type %s", errmsg("join estimator function %s must return type %s",
NameListToString(joinName), "float8"))); NameListToString(joinName), "float8")));
/* Require EXECUTE rights for the estimator */ /* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE); aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE);

View File

@ -496,7 +496,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/* Must own relation. */ /* Must own relation. */
if (pg_class_ownercheck(relid, GetUserId())) if (pg_class_ownercheck(relid, GetUserId()))
noperm = false; /* user is allowed to modify this policy */ noperm = false; /* user is allowed to modify this policy */
else else
ereport(WARNING, ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED), (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
@ -511,15 +511,16 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
*/ */
if (!noperm && num_roles > 0) if (!noperm && num_roles > 0)
{ {
int i, j; int i,
j;
Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles); Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles);
Datum *role_oids; Datum *role_oids;
char *qual_value; char *qual_value;
Node *qual_expr; Node *qual_expr;
List *qual_parse_rtable = NIL; List *qual_parse_rtable = NIL;
char *with_check_value; char *with_check_value;
Node *with_check_qual; Node *with_check_qual;
List *with_check_parse_rtable = NIL; List *with_check_parse_rtable = NIL;
Datum values[Natts_pg_policy]; Datum values[Natts_pg_policy];
bool isnull[Natts_pg_policy]; bool isnull[Natts_pg_policy];
bool replaces[Natts_pg_policy]; bool replaces[Natts_pg_policy];
@ -536,15 +537,14 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/* /*
* All of the dependencies will be removed from the policy and then * All of the dependencies will be removed from the policy and then
* re-added. In order to get them correct, we need to extract out * re-added. In order to get them correct, we need to extract out the
* the expressions in the policy and construct a parsestate just * expressions in the policy and construct a parsestate just enough to
* enough to build the range table(s) to then pass to * build the range table(s) to then pass to recordDependencyOnExpr().
* recordDependencyOnExpr().
*/ */
/* Get policy qual, to update dependencies */ /* Get policy qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polqual, value_datum = heap_getattr(tuple, Anum_pg_policy_polqual,
RelationGetDescr(pg_policy_rel), &attr_isnull); RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull) if (!attr_isnull)
{ {
ParseState *qual_pstate; ParseState *qual_pstate;
@ -566,7 +566,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/* Get WITH CHECK qual, to update dependencies */ /* Get WITH CHECK qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck, value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck,
RelationGetDescr(pg_policy_rel), &attr_isnull); RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull) if (!attr_isnull)
{ {
ParseState *with_check_pstate; ParseState *with_check_pstate;
@ -665,7 +665,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
heap_close(pg_policy_rel, RowExclusiveLock); heap_close(pg_policy_rel, RowExclusiveLock);
return(noperm || num_roles > 0); return (noperm || num_roles > 0);
} }
/* /*
@ -996,8 +996,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
/* Get policy command */ /* Get policy command */
polcmd_datum = heap_getattr(policy_tuple, Anum_pg_policy_polcmd, polcmd_datum = heap_getattr(policy_tuple, Anum_pg_policy_polcmd,
RelationGetDescr(pg_policy_rel), RelationGetDescr(pg_policy_rel),
&polcmd_isnull); &polcmd_isnull);
Assert(!polcmd_isnull); Assert(!polcmd_isnull);
polcmd = DatumGetChar(polcmd_datum); polcmd = DatumGetChar(polcmd_datum);
@ -1029,15 +1029,15 @@ AlterPolicy(AlterPolicyStmt *stmt)
} }
else else
{ {
Oid *roles; Oid *roles;
Datum roles_datum; Datum roles_datum;
bool attr_isnull; bool attr_isnull;
ArrayType *policy_roles; ArrayType *policy_roles;
/* /*
* We need to pull the set of roles this policy applies to from * We need to pull the set of roles this policy applies to from what's
* what's in the catalog, so that we can recreate the dependencies * in the catalog, so that we can recreate the dependencies correctly
* correctly for the policy. * for the policy.
*/ */
roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles, roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles,
@ -1065,13 +1065,13 @@ AlterPolicy(AlterPolicyStmt *stmt)
} }
else else
{ {
Datum value_datum; Datum value_datum;
bool attr_isnull; bool attr_isnull;
/* /*
* We need to pull the USING expression and build the range table for * We need to pull the USING expression and build the range table for
* the policy from what's in the catalog, so that we can recreate * the policy from what's in the catalog, so that we can recreate the
* the dependencies correctly for the policy. * dependencies correctly for the policy.
*/ */
/* Check if the policy has a USING expr */ /* Check if the policy has a USING expr */
@ -1106,8 +1106,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
} }
else else
{ {
Datum value_datum; Datum value_datum;
bool attr_isnull; bool attr_isnull;
/* /*
* We need to pull the WITH CHECK expression and build the range table * We need to pull the WITH CHECK expression and build the range table

View File

@ -114,8 +114,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID) if (funcrettype != LANGUAGE_HANDLEROID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s", errmsg("function %s must return type %s",
NameListToString(funcname), "language_handler"))); NameListToString(funcname), "language_handler")));
} }
else else
{ {
@ -285,8 +285,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
else else
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE), (errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s", errmsg("function %s must return type %s",
NameListToString(stmt->plhandler), "language_handler"))); NameListToString(stmt->plhandler), "language_handler")));
} }
/* validate the inline function */ /* validate the inline function */

View File

@ -532,8 +532,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name * can skip this for internally generated triggers, since the name
* modification above should be sufficient. * modification above should be sufficient.
* *
* NOTE that this is cool only because we have ShareRowExclusiveLock on the * NOTE that this is cool only because we have ShareRowExclusiveLock on
* relation, so the trigger set won't be changing underneath us. * the relation, so the trigger set won't be changing underneath us.
*/ */
if (!isInternal) if (!isInternal)
{ {

View File

@ -450,8 +450,8 @@ DefineType(List *names, List *parameters)
{ {
/* backwards-compatibility hack */ /* backwards-compatibility hack */
ereport(WARNING, ereport(WARNING,
(errmsg("changing return type of function %s from %s to %s", (errmsg("changing return type of function %s from %s to %s",
NameListToString(inputName), "opaque", typeName))); NameListToString(inputName), "opaque", typeName)));
SetFunctionReturnType(inputOid, typoid); SetFunctionReturnType(inputOid, typoid);
} }
else else
@ -467,15 +467,15 @@ DefineType(List *names, List *parameters)
{ {
/* backwards-compatibility hack */ /* backwards-compatibility hack */
ereport(WARNING, ereport(WARNING,
(errmsg("changing return type of function %s from %s to %s", (errmsg("changing return type of function %s from %s to %s",
NameListToString(outputName), "opaque", "cstring"))); NameListToString(outputName), "opaque", "cstring")));
SetFunctionReturnType(outputOid, CSTRINGOID); SetFunctionReturnType(outputOid, CSTRINGOID);
} }
else else
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION), (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("type output function %s must return type %s", errmsg("type output function %s must return type %s",
NameListToString(outputName), "cstring"))); NameListToString(outputName), "cstring")));
} }
if (receiveOid) if (receiveOid)
{ {
@ -492,8 +492,8 @@ DefineType(List *names, List *parameters)
if (resulttype != BYTEAOID) if (resulttype != BYTEAOID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION), (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("type send function %s must return type %s", errmsg("type send function %s must return type %s",
NameListToString(sendName), "bytea"))); NameListToString(sendName), "bytea")));
} }
/* /*
@ -1888,8 +1888,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
if (get_func_rettype(procOid) != BOOLOID) if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION), (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("type analyze function %s must return type %s", errmsg("type analyze function %s must return type %s",
NameListToString(procname), "boolean"))); NameListToString(procname), "boolean")));
return procOid; return procOid;
} }
@ -3313,9 +3313,9 @@ AlterTypeOwner_oid(Oid typeOid, Oid newOwnerId, bool hasDependEntry)
typTup = (Form_pg_type) GETSTRUCT(tup); typTup = (Form_pg_type) GETSTRUCT(tup);
/* /*
* If it's a composite type, invoke ATExecChangeOwner so that we fix up the * If it's a composite type, invoke ATExecChangeOwner so that we fix up
* pg_class entry properly. That will call back to AlterTypeOwnerInternal * the pg_class entry properly. That will call back to
* to take care of the pg_type entry(s). * AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/ */
if (typTup->typtype == TYPTYPE_COMPOSITE) if (typTup->typtype == TYPTYPE_COMPOSITE)
ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock); ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock);

View File

@ -302,7 +302,7 @@ CreateRole(CreateRoleStmt *stmt)
if (!superuser()) if (!superuser())
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change bypassrls attribute"))); errmsg("must be superuser to change bypassrls attribute")));
} }
else else
{ {
@ -320,8 +320,8 @@ CreateRole(CreateRoleStmt *stmt)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME), (errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved", errmsg("role name \"%s\" is reserved",
stmt->role), stmt->role),
errdetail("Role names starting with \"pg_\" are reserved."))); errdetail("Role names starting with \"pg_\" are reserved.")));
/* /*
* Check the pg_authid relation to be certain the role doesn't already * Check the pg_authid relation to be certain the role doesn't already
@ -977,7 +977,7 @@ DropRole(DropRoleStmt *stmt)
if (rolspec->roletype != ROLESPEC_CSTRING) if (rolspec->roletype != ROLESPEC_CSTRING)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot use special role specifier in DROP ROLE"))); errmsg("cannot use special role specifier in DROP ROLE")));
role = rolspec->rolename; role = rolspec->rolename;
tuple = SearchSysCache1(AUTHNAME, PointerGetDatum(role)); tuple = SearchSysCache1(AUTHNAME, PointerGetDatum(role));
@ -1167,22 +1167,22 @@ RenameRole(const char *oldname, const char *newname)
errmsg("current user cannot be renamed"))); errmsg("current user cannot be renamed")));
/* /*
* Check that the user is not trying to rename a system role and * Check that the user is not trying to rename a system role and not
* not trying to rename a role into the reserved "pg_" namespace. * trying to rename a role into the reserved "pg_" namespace.
*/ */
if (IsReservedName(NameStr(authform->rolname))) if (IsReservedName(NameStr(authform->rolname)))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME), (errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved", errmsg("role name \"%s\" is reserved",
NameStr(authform->rolname)), NameStr(authform->rolname)),
errdetail("Role names starting with \"pg_\" are reserved."))); errdetail("Role names starting with \"pg_\" are reserved.")));
if (IsReservedName(newname)) if (IsReservedName(newname))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME), (errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved", errmsg("role name \"%s\" is reserved",
newname), newname),
errdetail("Role names starting with \"pg_\" are reserved."))); errdetail("Role names starting with \"pg_\" are reserved.")));
/* make sure the new name doesn't exist */ /* make sure the new name doesn't exist */
if (SearchSysCacheExists1(AUTHNAME, CStringGetDatum(newname))) if (SearchSysCacheExists1(AUTHNAME, CStringGetDatum(newname)))

View File

@ -1192,9 +1192,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
} }
/* /*
* If the all-visible page is turned out to be all-frozen but not marked, * If the all-visible page is turned out to be all-frozen but not
* we should so mark it. Note that all_frozen is only valid if all_visible * marked, we should so mark it. Note that all_frozen is only valid
* is true, so we must check both. * if all_visible is true, so we must check both.
*/ */
else if (all_visible_according_to_vm && all_visible && all_frozen && else if (all_visible_according_to_vm && all_visible && all_frozen &&
!VM_ALL_FROZEN(onerel, blkno, &vmbuffer)) !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
@ -1660,7 +1660,7 @@ should_attempt_truncation(LVRelStats *vacrelstats)
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages; possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable > 0 && if (possibly_freeable > 0 &&
(possibly_freeable >= REL_TRUNCATE_MINIMUM || (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) && possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
old_snapshot_threshold < 0) old_snapshot_threshold < 0)
return true; return true;
else else

View File

@ -880,9 +880,9 @@ check_role(char **newval, void **extra, GucSource source)
ReleaseSysCache(roleTup); ReleaseSysCache(roleTup);
/* /*
* Verify that session user is allowed to become this role, but * Verify that session user is allowed to become this role, but skip
* skip this in parallel mode, where we must blindly recreate the * this in parallel mode, where we must blindly recreate the parallel
* parallel leader's state. * leader's state.
*/ */
if (!InitializingParallelWorker && if (!InitializingParallelWorker &&
!is_member_of_role(GetSessionUserId(), roleid)) !is_member_of_role(GetSessionUserId(), roleid))

View File

@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node)
return false; return false;
/* /*
* Parallel-aware nodes return a subset of the tuples in each worker, * Parallel-aware nodes return a subset of the tuples in each worker, and
* and in general we can't expect to have enough bookkeeping state to * in general we can't expect to have enough bookkeeping state to know
* know which ones we returned in this worker as opposed to some other * which ones we returned in this worker as opposed to some other worker.
* worker.
*/ */
if (node->parallel_aware) if (node->parallel_aware)
return false; return false;

View File

@ -725,7 +725,7 @@ retry:
{ {
TransactionId xwait; TransactionId xwait;
ItemPointerData ctid_wait; ItemPointerData ctid_wait;
XLTW_Oper reason_wait; XLTW_Oper reason_wait;
Datum existing_values[INDEX_MAX_KEYS]; Datum existing_values[INDEX_MAX_KEYS];
bool existing_isnull[INDEX_MAX_KEYS]; bool existing_isnull[INDEX_MAX_KEYS];
char *error_new; char *error_new;

View File

@ -1851,25 +1851,25 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
if (wco->polname != NULL) if (wco->polname != NULL)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy \"%s\" for table \"%s\"", errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
wco->polname, wco->relname))); wco->polname, wco->relname)));
else else
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy for table \"%s\"", errmsg("new row violates row-level security policy for table \"%s\"",
wco->relname))); wco->relname)));
break; break;
case WCO_RLS_CONFLICT_CHECK: case WCO_RLS_CONFLICT_CHECK:
if (wco->polname != NULL) if (wco->polname != NULL)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"", errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
wco->polname, wco->relname))); wco->polname, wco->relname)));
else else
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy (USING expression) for table \"%s\"", errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
wco->relname))); wco->relname)));
break; break;
default: default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind); elog(ERROR, "unrecognized WCO kind: %u", wco->kind);

View File

@ -83,7 +83,7 @@ struct SharedExecutorInstrumentation
typedef struct ExecParallelEstimateContext typedef struct ExecParallelEstimateContext
{ {
ParallelContext *pcxt; ParallelContext *pcxt;
int nnodes; int nnodes;
} ExecParallelEstimateContext; } ExecParallelEstimateContext;
/* Context object for ExecParallelInitializeDSM. */ /* Context object for ExecParallelInitializeDSM. */
@ -91,7 +91,7 @@ typedef struct ExecParallelInitializeDSMContext
{ {
ParallelContext *pcxt; ParallelContext *pcxt;
SharedExecutorInstrumentation *instrumentation; SharedExecutorInstrumentation *instrumentation;
int nnodes; int nnodes;
} ExecParallelInitializeDSMContext; } ExecParallelInitializeDSMContext;
/* Helper functions that run in the parallel leader. */ /* Helper functions that run in the parallel leader. */
@ -99,11 +99,11 @@ static char *ExecSerializePlan(Plan *plan, EState *estate);
static bool ExecParallelEstimate(PlanState *node, static bool ExecParallelEstimate(PlanState *node,
ExecParallelEstimateContext *e); ExecParallelEstimateContext *e);
static bool ExecParallelInitializeDSM(PlanState *node, static bool ExecParallelInitializeDSM(PlanState *node,
ExecParallelInitializeDSMContext *d); ExecParallelInitializeDSMContext *d);
static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt, static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
bool reinitialize); bool reinitialize);
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation); SharedExecutorInstrumentation *instrumentation);
/* Helper functions that run in the parallel worker. */ /* Helper functions that run in the parallel worker. */
static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc); static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
@ -387,12 +387,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
/* Estimate space for tuple queues. */ /* Estimate space for tuple queues. */
shm_toc_estimate_chunk(&pcxt->estimator, shm_toc_estimate_chunk(&pcxt->estimator,
mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1); shm_toc_estimate_keys(&pcxt->estimator, 1);
/* /*
* Give parallel-aware nodes a chance to add to the estimates, and get * Give parallel-aware nodes a chance to add to the estimates, and get a
* a count of how many PlanState nodes there are. * count of how many PlanState nodes there are.
*/ */
e.pcxt = pcxt; e.pcxt = pcxt;
e.nnodes = 0; e.nnodes = 0;
@ -444,14 +444,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false); pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
/* /*
* If instrumentation options were supplied, allocate space for the * If instrumentation options were supplied, allocate space for the data.
* data. It only gets partially initialized here; the rest happens * It only gets partially initialized here; the rest happens during
* during ExecParallelInitializeDSM. * ExecParallelInitializeDSM.
*/ */
if (estate->es_instrument) if (estate->es_instrument)
{ {
Instrumentation *instrument; Instrumentation *instrument;
int i; int i;
instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len); instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
instrumentation->instrument_options = estate->es_instrument; instrumentation->instrument_options = estate->es_instrument;
@ -493,13 +493,13 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
*/ */
static bool static bool
ExecParallelRetrieveInstrumentation(PlanState *planstate, ExecParallelRetrieveInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation) SharedExecutorInstrumentation *instrumentation)
{ {
Instrumentation *instrument; Instrumentation *instrument;
int i; int i;
int n; int n;
int ibytes; int ibytes;
int plan_node_id = planstate->plan->plan_node_id; int plan_node_id = planstate->plan->plan_node_id;
/* Find the instumentation for this node. */ /* Find the instumentation for this node. */
for (i = 0; i < instrumentation->num_plan_nodes; ++i) for (i = 0; i < instrumentation->num_plan_nodes; ++i)
@ -532,7 +532,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
void void
ExecParallelFinish(ParallelExecutorInfo *pei) ExecParallelFinish(ParallelExecutorInfo *pei)
{ {
int i; int i;
if (pei->finished) if (pei->finished)
return; return;
@ -626,19 +626,19 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
*/ */
static bool static bool
ExecParallelReportInstrumentation(PlanState *planstate, ExecParallelReportInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation) SharedExecutorInstrumentation *instrumentation)
{ {
int i; int i;
int plan_node_id = planstate->plan->plan_node_id; int plan_node_id = planstate->plan->plan_node_id;
Instrumentation *instrument; Instrumentation *instrument;
InstrEndLoop(planstate->instrument); InstrEndLoop(planstate->instrument);
/* /*
* If we shuffled the plan_node_id values in ps_instrument into sorted * If we shuffled the plan_node_id values in ps_instrument into sorted
* order, we could use binary search here. This might matter someday * order, we could use binary search here. This might matter someday if
* if we're pushing down sufficiently large plan trees. For now, do it * we're pushing down sufficiently large plan trees. For now, do it the
* the slow, dumb way. * slow, dumb way.
*/ */
for (i = 0; i < instrumentation->num_plan_nodes; ++i) for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id) if (instrumentation->plan_node_id[i] == plan_node_id)

View File

@ -497,8 +497,8 @@ init_execution_state(List *queryTree_list,
stmt = queryTree->utilityStmt; stmt = queryTree->utilityStmt;
else else
stmt = (Node *) pg_plan_query(queryTree, stmt = (Node *) pg_plan_query(queryTree,
fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0, fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
NULL); NULL);
/* Precheck all commands for validity in a function */ /* Precheck all commands for validity in a function */
if (IsA(stmt, TransactionStmt)) if (IsA(stmt, TransactionStmt))

View File

@ -491,9 +491,9 @@ static void finalize_aggregate(AggState *aggstate,
AggStatePerGroup pergroupstate, AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull); Datum *resultVal, bool *resultIsNull);
static void finalize_partialaggregate(AggState *aggstate, static void finalize_partialaggregate(AggState *aggstate,
AggStatePerAgg peragg, AggStatePerAgg peragg,
AggStatePerGroup pergroupstate, AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull); Datum *resultVal, bool *resultIsNull);
static void prepare_projection_slot(AggState *aggstate, static void prepare_projection_slot(AggState *aggstate,
TupleTableSlot *slot, TupleTableSlot *slot,
int currentSet); int currentSet);
@ -981,17 +981,18 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
if (OidIsValid(pertrans->deserialfn_oid)) if (OidIsValid(pertrans->deserialfn_oid))
{ {
/* /*
* Don't call a strict deserialization function with NULL input. * Don't call a strict deserialization function with NULL input. A
* A strict deserialization function and a null value means we skip * strict deserialization function and a null value means we skip
* calling the combine function for this state. We assume that this * calling the combine function for this state. We assume that
* would be a waste of time and effort anyway so just skip it. * this would be a waste of time and effort anyway so just skip
* it.
*/ */
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0]) if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
continue; continue;
else else
{ {
FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo; FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
MemoryContext oldContext; MemoryContext oldContext;
dsinfo->arg[0] = slot->tts_values[0]; dsinfo->arg[0] = slot->tts_values[0];
dsinfo->argnull[0] = slot->tts_isnull[0]; dsinfo->argnull[0] = slot->tts_isnull[0];
@ -1423,14 +1424,14 @@ finalize_partialaggregate(AggState *aggstate,
AggStatePerGroup pergroupstate, AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull) Datum *resultVal, bool *resultIsNull)
{ {
AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno]; AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
MemoryContext oldContext; MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory); oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
/* /*
* serialfn_oid will be set if we must serialize the input state * serialfn_oid will be set if we must serialize the input state before
* before calling the combine function on the state. * calling the combine function on the state.
*/ */
if (OidIsValid(pertrans->serialfn_oid)) if (OidIsValid(pertrans->serialfn_oid))
{ {
@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate,
else else
{ {
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo; FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
fcinfo->arg[0] = pergroupstate->transValue; fcinfo->arg[0] = pergroupstate->transValue;
fcinfo->argnull[0] = pergroupstate->transValueIsNull; fcinfo->argnull[0] = pergroupstate->transValueIsNull;
@ -1459,7 +1461,7 @@ finalize_partialaggregate(AggState *aggstate,
/* If result is pass-by-ref, make sure it is in the right context. */ /* If result is pass-by-ref, make sure it is in the right context. */
if (!peragg->resulttypeByVal && !*resultIsNull && if (!peragg->resulttypeByVal && !*resultIsNull &&
!MemoryContextContains(CurrentMemoryContext, !MemoryContextContains(CurrentMemoryContext,
DatumGetPointer(*resultVal))) DatumGetPointer(*resultVal)))
*resultVal = datumCopy(*resultVal, *resultVal = datumCopy(*resultVal,
peragg->resulttypeByVal, peragg->resulttypeByVal,
peragg->resulttypeLen); peragg->resulttypeLen);
@ -2627,21 +2629,21 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* *
* 1. An aggregate function appears more than once in query: * 1. An aggregate function appears more than once in query:
* *
* SELECT SUM(x) FROM ... HAVING SUM(x) > 0 * SELECT SUM(x) FROM ... HAVING SUM(x) > 0
* *
* Since the aggregates are the identical, we only need to calculate * Since the aggregates are the identical, we only need to calculate
* the calculate it once. Both aggregates will share the same 'aggno' * the calculate it once. Both aggregates will share the same 'aggno'
* value. * value.
* *
* 2. Two different aggregate functions appear in the query, but the * 2. Two different aggregate functions appear in the query, but the
* aggregates have the same transition function and initial value, but * aggregates have the same transition function and initial value, but
* different final function: * different final function:
* *
* SELECT SUM(x), AVG(x) FROM ... * SELECT SUM(x), AVG(x) FROM ...
* *
* In this case we must create a new peragg for the varying aggregate, * In this case we must create a new peragg for the varying aggregate,
* and need to call the final functions separately, but can share the * and need to call the final functions separately, but can share the
* same transition state. * same transition state.
* *
* For either of these optimizations to be valid, the aggregate's * For either of these optimizations to be valid, the aggregate's
* arguments must be the same, including any modifiers such as ORDER BY, * arguments must be the same, including any modifiers such as ORDER BY,
@ -2889,8 +2891,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/ */
existing_transno = find_compatible_pertrans(aggstate, aggref, existing_transno = find_compatible_pertrans(aggstate, aggref,
transfn_oid, aggtranstype, transfn_oid, aggtranstype,
serialfn_oid, deserialfn_oid, serialfn_oid, deserialfn_oid,
initValue, initValueIsNull, initValue, initValueIsNull,
same_input_transnos); same_input_transnos);
if (existing_transno != -1) if (existing_transno != -1)
{ {
@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
/* /*
* The serialization and deserialization functions must match, if * The serialization and deserialization functions must match, if
* present, as we're unable to share the trans state for aggregates * present, as we're unable to share the trans state for aggregates
* which will serialize or deserialize into different formats. Remember * which will serialize or deserialize into different formats.
* that these will be InvalidOid if they're not required for this agg * Remember that these will be InvalidOid if they're not required for
* node. * this agg node.
*/ */
if (aggserialfn != pertrans->serialfn_oid || if (aggserialfn != pertrans->serialfn_oid ||
aggdeserialfn != pertrans->deserialfn_oid) aggdeserialfn != pertrans->deserialfn_oid)

View File

@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node)
/* /*
* If chgParam of subnode is not null then plan will be re-scanned by * If chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode. outerPlan may also be NULL, in which case there * first ExecProcNode. outerPlan may also be NULL, in which case there is
* is nothing to rescan at all. * nothing to rescan at all.
*/ */
if (outerPlan != NULL && outerPlan->chgParam == NULL) if (outerPlan != NULL && outerPlan->chgParam == NULL)
ExecReScan(outerPlan); ExecReScan(outerPlan);

View File

@ -138,8 +138,8 @@ ExecGather(GatherState *node)
/* /*
* Initialize the parallel context and workers on first execution. We do * Initialize the parallel context and workers on first execution. We do
* this on first execution rather than during node initialization, as it * this on first execution rather than during node initialization, as it
* needs to allocate large dynamic segment, so it is better to do if it * needs to allocate large dynamic segment, so it is better to do if it is
* is really needed. * really needed.
*/ */
if (!node->initialized) if (!node->initialized)
{ {
@ -147,8 +147,8 @@ ExecGather(GatherState *node)
Gather *gather = (Gather *) node->ps.plan; Gather *gather = (Gather *) node->ps.plan;
/* /*
* Sometimes we might have to run without parallelism; but if * Sometimes we might have to run without parallelism; but if parallel
* parallel mode is active then we can try to fire up some workers. * mode is active then we can try to fire up some workers.
*/ */
if (gather->num_workers > 0 && IsInParallelMode()) if (gather->num_workers > 0 && IsInParallelMode())
{ {
@ -186,7 +186,7 @@ ExecGather(GatherState *node)
} }
else else
{ {
/* No workers? Then never mind. */ /* No workers? Then never mind. */
ExecShutdownGatherWorkers(node); ExecShutdownGatherWorkers(node);
} }
} }
@ -314,7 +314,7 @@ gather_getnext(GatherState *gatherstate)
static HeapTuple static HeapTuple
gather_readnext(GatherState *gatherstate) gather_readnext(GatherState *gatherstate)
{ {
int waitpos = gatherstate->nextreader; int waitpos = gatherstate->nextreader;
for (;;) for (;;)
{ {
@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate)
tup = TupleQueueReaderNext(reader, true, &readerdone); tup = TupleQueueReaderNext(reader, true, &readerdone);
/* /*
* If this reader is done, remove it. If all readers are done, * If this reader is done, remove it. If all readers are done, clean
* clean up remaining worker state. * up remaining worker state.
*/ */
if (readerdone) if (readerdone)
{ {
@ -402,7 +402,7 @@ ExecShutdownGatherWorkers(GatherState *node)
/* Shut down tuple queue readers before shutting down workers. */ /* Shut down tuple queue readers before shutting down workers. */
if (node->reader != NULL) if (node->reader != NULL)
{ {
int i; int i;
for (i = 0; i < node->nreaders; ++i) for (i = 0; i < node->nreaders; ++i)
DestroyTupleQueueReader(node->reader[i]); DestroyTupleQueueReader(node->reader[i]);
@ -452,10 +452,10 @@ void
ExecReScanGather(GatherState *node) ExecReScanGather(GatherState *node)
{ {
/* /*
* Re-initialize the parallel workers to perform rescan of relation. * Re-initialize the parallel workers to perform rescan of relation. We
* We want to gracefully shutdown all the workers so that they * want to gracefully shutdown all the workers so that they should be able
* should be able to propagate any error or other information to master * to propagate any error or other information to master backend before
* backend before dying. Parallel context will be reused for rescan. * dying. Parallel context will be reused for rescan.
*/ */
ExecShutdownGatherWorkers(node); ExecShutdownGatherWorkers(node);

View File

@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/* /*
* Note that it is possible that the target tuple has been modified in * Note that it is possible that the target tuple has been modified in
* this session, after the above heap_lock_tuple. We choose to not error * this session, after the above heap_lock_tuple. We choose to not error
* out in that case, in line with ExecUpdate's treatment of similar * out in that case, in line with ExecUpdate's treatment of similar cases.
* cases. This can happen if an UPDATE is triggered from within * This can happen if an UPDATE is triggered from within ExecQual(),
* ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
* selecting from a wCTE in the ON CONFLICT's SET. * wCTE in the ON CONFLICT's SET.
*/ */
/* Execute UPDATE with projection */ /* Execute UPDATE with projection */
@ -1595,7 +1595,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/* Initialize the usesFdwDirectModify flag */ /* Initialize the usesFdwDirectModify flag */
resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i, resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
node->fdwDirectModifyPlans); node->fdwDirectModifyPlans);
/* /*
* Verify result relation is a valid target for the current operation * Verify result relation is a valid target for the current operation

View File

@ -65,8 +65,8 @@ SeqNext(SeqScanState *node)
if (scandesc == NULL) if (scandesc == NULL)
{ {
/* /*
* We reach here if the scan is not parallel, or if we're executing * We reach here if the scan is not parallel, or if we're executing a
* a scan that was intended to be parallel serially. * scan that was intended to be parallel serially.
*/ */
scandesc = heap_beginscan(node->ss.ss_currentRelation, scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot, estate->es_snapshot,
@ -145,7 +145,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags)
* open that relation and acquire appropriate lock on it. * open that relation and acquire appropriate lock on it.
*/ */
currentRelation = ExecOpenScanRelation(estate, currentRelation = ExecOpenScanRelation(estate,
((SeqScan *) node->ss.ps.plan)->scanrelid, ((SeqScan *) node->ss.ps.plan)->scanrelid,
eflags); eflags);
node->ss.ss_currentRelation = currentRelation; node->ss.ss_currentRelation = currentRelation;
@ -277,8 +277,8 @@ ExecReScanSeqScan(SeqScanState *node)
scan = node->ss.ss_currentScanDesc; scan = node->ss.ss_currentScanDesc;
if (scan != NULL) if (scan != NULL)
heap_rescan(scan, /* scan desc */ heap_rescan(scan, /* scan desc */
NULL); /* new scan keys */ NULL); /* new scan keys */
ExecScanReScan((ScanState *) node); ExecScanReScan((ScanState *) node);
} }
@ -316,7 +316,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
ParallelContext *pcxt) ParallelContext *pcxt)
{ {
EState *estate = node->ss.ps.state; EState *estate = node->ss.ps.state;
ParallelHeapScanDesc pscan; ParallelHeapScanDesc pscan;
pscan = shm_toc_allocate(pcxt->toc, node->pscan_len); pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
heap_parallelscan_initialize(pscan, heap_parallelscan_initialize(pscan,
@ -336,7 +336,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
void void
ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc) ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc)
{ {
ParallelHeapScanDesc pscan; ParallelHeapScanDesc pscan;
pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id); pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
node->ss.ss_currentScanDesc = node->ss.ss_currentScanDesc =

View File

@ -2220,8 +2220,8 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
/* build expression trees using actual argument & result types */ /* build expression trees using actual argument & result types */
build_aggregate_transfn_expr(inputTypes, build_aggregate_transfn_expr(inputTypes,
numArguments, numArguments,
0, /* no ordered-set window functions yet */ 0, /* no ordered-set window functions yet */
false, /* no variadic window functions yet */ false, /* no variadic window functions yet */
wfunc->wintype, wfunc->wintype,
wfunc->inputcollid, wfunc->inputcollid,
transfn_oid, transfn_oid,

View File

@ -44,13 +44,13 @@ typedef enum
TQUEUE_REMAP_ARRAY, /* array */ TQUEUE_REMAP_ARRAY, /* array */
TQUEUE_REMAP_RANGE, /* range */ TQUEUE_REMAP_RANGE, /* range */
TQUEUE_REMAP_RECORD /* composite type, named or anonymous */ TQUEUE_REMAP_RECORD /* composite type, named or anonymous */
} RemapClass; } RemapClass;
typedef struct typedef struct
{ {
int natts; int natts;
RemapClass mapping[FLEXIBLE_ARRAY_MEMBER]; RemapClass mapping[FLEXIBLE_ARRAY_MEMBER];
} RemapInfo; } RemapInfo;
typedef struct typedef struct
{ {
@ -61,13 +61,13 @@ typedef struct
char mode; char mode;
TupleDesc tupledesc; TupleDesc tupledesc;
RemapInfo *remapinfo; RemapInfo *remapinfo;
} TQueueDestReceiver; } TQueueDestReceiver;
typedef struct RecordTypemodMap typedef struct RecordTypemodMap
{ {
int remotetypmod; int remotetypmod;
int localtypmod; int localtypmod;
} RecordTypemodMap; } RecordTypemodMap;
struct TupleQueueReader struct TupleQueueReader
{ {
@ -81,19 +81,19 @@ struct TupleQueueReader
#define TUPLE_QUEUE_MODE_CONTROL 'c' #define TUPLE_QUEUE_MODE_CONTROL 'c'
#define TUPLE_QUEUE_MODE_DATA 'd' #define TUPLE_QUEUE_MODE_DATA 'd'
static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype,
Datum value); Datum value);
static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value); static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value); static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value); static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value);
static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc); TupleDesc tupledesc);
static void TupleQueueHandleControlMessage(TupleQueueReader *reader, static void TupleQueueHandleControlMessage(TupleQueueReader *reader,
Size nbytes, char *data); Size nbytes, char *data);
static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader, static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader,
Size nbytes, HeapTupleHeader data); Size nbytes, HeapTupleHeader data);
static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader, static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader,
TupleDesc tupledesc, RemapInfo * remapinfo, TupleDesc tupledesc, RemapInfo *remapinfo,
HeapTuple tuple); HeapTuple tuple);
static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass, static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass,
Datum value); Datum value);
@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)
* Invoke the appropriate walker function based on the given RemapClass. * Invoke the appropriate walker function based on the given RemapClass.
*/ */
static void static void
tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value) tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value)
{ {
check_stack_depth(); check_stack_depth();
@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
* contained therein. * contained therein.
*/ */
static void static void
tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value) tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value)
{ {
HeapTupleHeader tup; HeapTupleHeader tup;
Oid typeid; Oid typeid;
@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
* contained therein. * contained therein.
*/ */
static void static void
tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value) tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value)
{ {
ArrayType *arr = DatumGetArrayTypeP(value); ArrayType *arr = DatumGetArrayTypeP(value);
Oid typeid = ARR_ELEMTYPE(arr); Oid typeid = ARR_ELEMTYPE(arr);
@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
* contained therein. * contained therein.
*/ */
static void static void
tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value) tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value)
{ {
RangeType *range = DatumGetRangeType(value); RangeType *range = DatumGetRangeType(value);
Oid typeid = RangeTypeGetOid(range); Oid typeid = RangeTypeGetOid(range);
@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
* already done so previously. * already done so previously.
*/ */
static void static void
tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc) TupleDesc tupledesc)
{ {
StringInfoData buf; StringInfoData buf;
@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader,
*/ */
static HeapTuple static HeapTuple
TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc, TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc,
RemapInfo * remapinfo, HeapTuple tuple) RemapInfo *remapinfo, HeapTuple tuple)
{ {
Datum *values; Datum *values;
bool *isnull; bool *isnull;

View File

@ -1875,7 +1875,7 @@ CheckPAMAuth(Port *port, char *user, char *password)
retval = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen, retval = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
hostinfo, sizeof(hostinfo), NULL, 0, hostinfo, sizeof(hostinfo), NULL, 0,
port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV); port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV);
if (retval != 0) if (retval != 0)
{ {
ereport(WARNING, ereport(WARNING,
@ -1934,7 +1934,7 @@ CheckPAMAuth(Port *port, char *user, char *password)
{ {
ereport(LOG, ereport(LOG,
(errmsg("pam_set_item(PAM_RHOST) failed: %s", (errmsg("pam_set_item(PAM_RHOST) failed: %s",
pam_strerror(pamh, retval)))); pam_strerror(pamh, retval))));
pam_passwd = NULL; pam_passwd = NULL;
return STATUS_ERROR; return STATUS_ERROR;
} }
@ -1996,8 +1996,8 @@ CheckPAMAuth(Port *port, char *user, char *password)
static int static int
CheckBSDAuth(Port *port, char *user) CheckBSDAuth(Port *port, char *user)
{ {
char *passwd; char *passwd;
int retval; int retval;
/* Send regular password request to client, and get the response */ /* Send regular password request to client, and get the response */
sendAuthRequest(port, AUTH_REQ_PASSWORD); sendAuthRequest(port, AUTH_REQ_PASSWORD);
@ -2539,11 +2539,10 @@ CheckRADIUSAuth(Port *port)
radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier)); radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier));
/* /*
* RADIUS password attributes are calculated as: * RADIUS password attributes are calculated as: e[0] = p[0] XOR
* e[0] = p[0] XOR MD5(secret + Request Authenticator) * MD5(secret + Request Authenticator) for the first group of 16 octets,
* for the first group of 16 octets, and then: * and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones
* e[i] = p[i] XOR MD5(secret + e[i-1]) * (if necessary)
* for the following ones (if necessary)
*/ */
encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH; encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH;
cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH); cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH);
@ -2554,7 +2553,11 @@ CheckRADIUSAuth(Port *port)
for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH) for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH)
{ {
memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH); memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH);
/* .. and for subsequent iterations the result of the previous XOR (calculated below) */
/*
* .. and for subsequent iterations the result of the previous XOR
* (calculated below)
*/
md5trailer = encryptedpassword + i; md5trailer = encryptedpassword + i;
if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i)) if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i))
@ -2565,7 +2568,7 @@ CheckRADIUSAuth(Port *port)
return STATUS_ERROR; return STATUS_ERROR;
} }
for (j = i; j < i+RADIUS_VECTOR_LENGTH; j++) for (j = i; j < i + RADIUS_VECTOR_LENGTH; j++)
{ {
if (j < strlen(passwd)) if (j < strlen(passwd))
encryptedpassword[j] = passwd[j] ^ encryptedpassword[j]; encryptedpassword[j] = passwd[j] ^ encryptedpassword[j];

View File

@ -241,8 +241,8 @@ be_tls_init(void)
(buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO))) (buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO)))
ereport(FATAL, ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR), (errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("private key file \"%s\" has group or world access", errmsg("private key file \"%s\" has group or world access",
ssl_key_file), ssl_key_file),
errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root."))); errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root.")));
#endif #endif
@ -316,7 +316,7 @@ be_tls_init(void)
else else
ereport(FATAL, ereport(FATAL,
(errmsg("could not load SSL certificate revocation list file \"%s\": %s", (errmsg("could not load SSL certificate revocation list file \"%s\": %s",
ssl_crl_file, SSLerrmessage(ERR_get_error())))); ssl_crl_file, SSLerrmessage(ERR_get_error()))));
} }
} }
@ -377,11 +377,12 @@ be_tls_open_server(Port *port)
port->ssl_in_use = true; port->ssl_in_use = true;
aloop: aloop:
/* /*
* Prepare to call SSL_get_error() by clearing thread's OpenSSL error * Prepare to call SSL_get_error() by clearing thread's OpenSSL error
* queue. In general, the current thread's error queue must be empty * queue. In general, the current thread's error queue must be empty
* before the TLS/SSL I/O operation is attempted, or SSL_get_error() * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will
* will not work reliably. An extension may have failed to clear the * not work reliably. An extension may have failed to clear the
* per-thread error queue following another call to an OpenSSL I/O * per-thread error queue following another call to an OpenSSL I/O
* routine. * routine.
*/ */
@ -393,12 +394,11 @@ aloop:
/* /*
* Other clients of OpenSSL in the backend may fail to call * Other clients of OpenSSL in the backend may fail to call
* ERR_get_error(), but we always do, so as to not cause problems * ERR_get_error(), but we always do, so as to not cause problems for
* for OpenSSL clients that don't call ERR_clear_error() * OpenSSL clients that don't call ERR_clear_error() defensively. Be
* defensively. Be sure that this happens by calling now. * sure that this happens by calling now. SSL_get_error() relies on
* SSL_get_error() relies on the OpenSSL per-thread error queue * the OpenSSL per-thread error queue being intact, so this is the
* being intact, so this is the earliest possible point * earliest possible point ERR_get_error() may be called.
* ERR_get_error() may be called.
*/ */
ecode = ERR_get_error(); ecode = ERR_get_error();
switch (err) switch (err)

View File

@ -140,26 +140,26 @@ retry:
/* In blocking mode, wait until the socket is ready */ /* In blocking mode, wait until the socket is ready */
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN)) if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{ {
WaitEvent event; WaitEvent event;
Assert(waitfor); Assert(waitfor);
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/* /*
* If the postmaster has died, it's not safe to continue running, * If the postmaster has died, it's not safe to continue running,
* because it is the postmaster's job to kill us if some other backend * because it is the postmaster's job to kill us if some other backend
* exists uncleanly. Moreover, we won't run very well in this state; * exists uncleanly. Moreover, we won't run very well in this state;
* helper processes like walwriter and the bgwriter will exit, so * helper processes like walwriter and the bgwriter will exit, so
* performance may be poor. Finally, if we don't exit, pg_ctl will * performance may be poor. Finally, if we don't exit, pg_ctl will be
* be unable to restart the postmaster without manual intervention, * unable to restart the postmaster without manual intervention, so no
* so no new connections can be accepted. Exiting clears the deck * new connections can be accepted. Exiting clears the deck for a
* for a postmaster restart. * postmaster restart.
* *
* (Note that we only make this check when we would otherwise sleep * (Note that we only make this check when we would otherwise sleep on
* on our latch. We might still continue running for a while if the * our latch. We might still continue running for a while if the
* postmaster is killed in mid-query, or even through multiple queries * postmaster is killed in mid-query, or even through multiple queries
* if we never have to wait for read. We don't want to burn too many * if we never have to wait for read. We don't want to burn too many
* cycles checking for this very rare condition, and this should cause * cycles checking for this very rare condition, and this should cause
@ -168,7 +168,7 @@ retry:
if (event.events & WL_POSTMASTER_DEATH) if (event.events & WL_POSTMASTER_DEATH)
ereport(FATAL, ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN), (errcode(ERRCODE_ADMIN_SHUTDOWN),
errmsg("terminating connection due to unexpected postmaster exit"))); errmsg("terminating connection due to unexpected postmaster exit")));
/* Handle interrupt. */ /* Handle interrupt. */
if (event.events & WL_LATCH_SET) if (event.events & WL_LATCH_SET)
@ -241,19 +241,19 @@ retry:
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN)) if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{ {
WaitEvent event; WaitEvent event;
Assert(waitfor); Assert(waitfor);
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/* See comments in secure_read. */ /* See comments in secure_read. */
if (event.events & WL_POSTMASTER_DEATH) if (event.events & WL_POSTMASTER_DEATH)
ereport(FATAL, ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN), (errcode(ERRCODE_ADMIN_SHUTDOWN),
errmsg("terminating connection due to unexpected postmaster exit"))); errmsg("terminating connection due to unexpected postmaster exit")));
/* Handle interrupt. */ /* Handle interrupt. */
if (event.events & WL_LATCH_SET) if (event.events & WL_LATCH_SET)

View File

@ -1174,7 +1174,7 @@ pq_startmsgread(void)
if (PqCommReadingMsg) if (PqCommReadingMsg)
ereport(FATAL, ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION), (errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("terminating connection because protocol synchronization was lost"))); errmsg("terminating connection because protocol synchronization was lost")));
PqCommReadingMsg = true; PqCommReadingMsg = true;
} }

View File

@ -143,9 +143,9 @@ mq_putmessage(char msgtype, const char *s, size_t len)
/* /*
* If the message queue is already gone, just ignore the message. This * If the message queue is already gone, just ignore the message. This
* doesn't necessarily indicate a problem; for example, DEBUG messages * doesn't necessarily indicate a problem; for example, DEBUG messages can
* can be generated late in the shutdown sequence, after all DSMs have * be generated late in the shutdown sequence, after all DSMs have already
* already been detached. * been detached.
*/ */
if (pq_mq == NULL) if (pq_mq == NULL)
return 0; return 0;

View File

@ -270,19 +270,22 @@ startup_hacks(const char *progname)
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
#if defined(_M_AMD64) && _MSC_VER == 1800 #if defined(_M_AMD64) && _MSC_VER == 1800
/* /*
* Avoid crashing in certain floating-point operations if * Avoid crashing in certain floating-point operations if we were
* we were compiled for x64 with MS Visual Studio 2013 and * compiled for x64 with MS Visual Studio 2013 and are running on
* are running on Windows prior to 7/2008R2 SP1 on an * Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU.
* AVX2-capable CPU.
* *
* Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions * Ref:
* https://connect.microsoft.com/VisualStudio/feedback/details/811093/v
* isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction
* s
*/ */
if (!IsWindows7SP1OrGreater()) if (!IsWindows7SP1OrGreater())
{ {
_set_FMA3_enable(0); _set_FMA3_enable(0);
} }
#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */ #endif /* defined(_M_AMD64) && _MSC_VER == 1800 */
} }
#endif /* WIN32 */ #endif /* WIN32 */

View File

@ -3499,7 +3499,7 @@ planstate_tree_walker(PlanState *planstate,
return true; return true;
break; break;
case T_CustomScan: case T_CustomScan:
foreach (lc, ((CustomScanState *) planstate)->custom_ps) foreach(lc, ((CustomScanState *) planstate)->custom_ps)
{ {
if (walker((PlanState *) lfirst(lc), context)) if (walker((PlanState *) lfirst(lc), context))
return true; return true;

View File

@ -94,8 +94,8 @@ copyParamList(ParamListInfo from)
Size Size
EstimateParamListSpace(ParamListInfo paramLI) EstimateParamListSpace(ParamListInfo paramLI)
{ {
int i; int i;
Size sz = sizeof(int); Size sz = sizeof(int);
if (paramLI == NULL || paramLI->numParams <= 0) if (paramLI == NULL || paramLI->numParams <= 0)
return sz; return sz;
@ -119,7 +119,7 @@ EstimateParamListSpace(ParamListInfo paramLI)
typeOid = prm->ptype; typeOid = prm->ptype;
} }
sz = add_size(sz, sizeof(Oid)); /* space for type OID */ sz = add_size(sz, sizeof(Oid)); /* space for type OID */
sz = add_size(sz, sizeof(uint16)); /* space for pflags */ sz = add_size(sz, sizeof(uint16)); /* space for pflags */
/* space for datum/isnull */ /* space for datum/isnull */
@ -132,7 +132,7 @@ EstimateParamListSpace(ParamListInfo paramLI)
typByVal = true; typByVal = true;
} }
sz = add_size(sz, sz = add_size(sz,
datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen)); datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen));
} }
return sz; return sz;

View File

@ -1836,8 +1836,8 @@ _readCustomScan(void)
READ_BITMAPSET_FIELD(custom_relids); READ_BITMAPSET_FIELD(custom_relids);
/* Lookup CustomScanMethods by CustomName */ /* Lookup CustomScanMethods by CustomName */
token = pg_strtok(&length); /* skip methods: */ token = pg_strtok(&length); /* skip methods: */
token = pg_strtok(&length); /* CustomName */ token = pg_strtok(&length); /* CustomName */
custom_name = nullable_string(token, length); custom_name = nullable_string(token, length);
methods = GetCustomScanMethods(custom_name, false); methods = GetCustomScanMethods(custom_name, false);
local_node->methods = methods; local_node->methods = methods;
@ -2227,11 +2227,12 @@ _readExtensibleNode(void)
{ {
const ExtensibleNodeMethods *methods; const ExtensibleNodeMethods *methods;
ExtensibleNode *local_node; ExtensibleNode *local_node;
const char *extnodename; const char *extnodename;
READ_TEMP_LOCALS(); READ_TEMP_LOCALS();
token = pg_strtok(&length); /* skip: extnodename */ token = pg_strtok(&length); /* skip: extnodename */
token = pg_strtok(&length); /* get extnodename */ token = pg_strtok(&length); /* get extnodename */
extnodename = nullable_string(token, length); extnodename = nullable_string(token, length);
if (!extnodename) if (!extnodename)

View File

@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist)
set_base_rel_consider_startup(root); set_base_rel_consider_startup(root);
/* /*
* Generate access paths for the base rels. set_base_rel_sizes also * Generate access paths for the base rels. set_base_rel_sizes also sets
* sets the consider_parallel flag for each baserel, if appropriate. * the consider_parallel flag for each baserel, if appropriate.
*/ */
set_base_rel_sizes(root); set_base_rel_sizes(root);
set_base_rel_pathlists(root); set_base_rel_pathlists(root);
@ -228,7 +228,7 @@ set_base_rel_consider_startup(PlannerInfo *root)
/* /*
* set_base_rel_sizes * set_base_rel_sizes
* Set the size estimates (rows and widths) for each base-relation entry. * Set the size estimates (rows and widths) for each base-relation entry.
* Also determine whether to consider parallel paths for base relations. * Also determine whether to consider parallel paths for base relations.
* *
* We do this in a separate pass over the base rels so that rowcount * We do this in a separate pass over the base rels so that rowcount
* estimates are available for parameterized path generation, and also so * estimates are available for parameterized path generation, and also so
@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
switch (rte->rtekind) switch (rte->rtekind)
{ {
case RTE_RELATION: case RTE_RELATION:
/* /*
* Currently, parallel workers can't access the leader's temporary * Currently, parallel workers can't access the leader's temporary
* tables. We could possibly relax this if the wrote all of its * tables. We could possibly relax this if the wrote all of its
@ -528,7 +529,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
*/ */
if (rte->tablesample != NULL) if (rte->tablesample != NULL)
{ {
Oid proparallel = func_parallel(rte->tablesample->tsmhandler); Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
if (proparallel != PROPARALLEL_SAFE) if (proparallel != PROPARALLEL_SAFE)
return; return;
@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_SUBQUERY: case RTE_SUBQUERY:
/* /*
* Subplans currently aren't passed to workers. Even if they * Subplans currently aren't passed to workers. Even if they
* were, the subplan might be using parallelism internally, and * were, the subplan might be using parallelism internally, and we
* we can't support nested Gather nodes at present. Finally, * can't support nested Gather nodes at present. Finally, we
* we don't have a good way of knowing whether the subplan * don't have a good way of knowing whether the subplan involves
* involves any parallel-restricted operations. It would be * any parallel-restricted operations. It would be nice to relax
* nice to relax this restriction some day, but it's going to * this restriction some day, but it's going to take a fair amount
* take a fair amount of work. * of work.
*/ */
return; return;
@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_VALUES: case RTE_VALUES:
/* /*
* The data for a VALUES clause is stored in the plan tree itself, * The data for a VALUES clause is stored in the plan tree itself,
* so scanning it in a worker is fine. * so scanning it in a worker is fine.
@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_CTE: case RTE_CTE:
/* /*
* CTE tuplestores aren't shared among parallel workers, so we * CTE tuplestores aren't shared among parallel workers, so we
* force all CTE scans to happen in the leader. Also, populating * force all CTE scans to happen in the leader. Also, populating
@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
} }
/* /*
* If there's anything in baserestrictinfo that's parallel-restricted, * If there's anything in baserestrictinfo that's parallel-restricted, we
* we give up on parallelizing access to this relation. We could consider * give up on parallelizing access to this relation. We could consider
* instead postponing application of the restricted quals until we're * instead postponing application of the restricted quals until we're
* above all the parallelism in the plan tree, but it's not clear that * above all the parallelism in the plan tree, but it's not clear that
* this would be a win in very many cases, and it might be tricky to make * this would be a win in very many cases, and it might be tricky to make
@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
return; return;
/* /*
* If the relation's outputs are not parallel-safe, we must give up. * If the relation's outputs are not parallel-safe, we must give up. In
* In the common case where the relation only outputs Vars, this check is * the common case where the relation only outputs Vars, this check is
* very cheap; otherwise, we have to do more work. * very cheap; otherwise, we have to do more work.
*/ */
if (rel->reltarget_has_non_vars && if (rel->reltarget_has_non_vars &&
@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
int parallel_workers = 0; int parallel_workers = 0;
/* /*
* Decide on the numebr of workers to request for this append path. For * Decide on the numebr of workers to request for this append path.
* now, we just use the maximum value from among the members. It * For now, we just use the maximum value from among the members. It
* might be useful to use a higher number if the Append node were * might be useful to use a higher number if the Append node were
* smart enough to spread out the workers, but it currently isn't. * smart enough to spread out the workers, but it currently isn't.
*/ */
@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* Run generate_gather_paths() for each just-processed joinrel. We * Run generate_gather_paths() for each just-processed joinrel. We
* could not do this earlier because both regular and partial paths * could not do this earlier because both regular and partial paths
* can get added to a particular joinrel at multiple times within * can get added to a particular joinrel at multiple times within
* join_search_one_level. After that, we're done creating paths * join_search_one_level. After that, we're done creating paths for
* for the joinrel, so run set_cheapest(). * the joinrel, so run set_cheapest().
*/ */
foreach(lc, root->join_rel_level[lev]) foreach(lc, root->join_rel_level[lev])
{ {

View File

@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
* We might not really need a Result node here. There are several ways * We might not really need a Result node here. There are several ways
* that this can happen. For example, MergeAppend doesn't project, so we * that this can happen. For example, MergeAppend doesn't project, so we
* would have thought that we needed a projection to attach resjunk sort * would have thought that we needed a projection to attach resjunk sort
* columns to its output ... but create_merge_append_plan might have * columns to its output ... but create_merge_append_plan might have added
* added those same resjunk sort columns to both MergeAppend and its * those same resjunk sort columns to both MergeAppend and its children.
* children. Alternatively, apply_projection_to_path might have created * Alternatively, apply_projection_to_path might have created a projection
* a projection path as the subpath of a Gather node even though the * path as the subpath of a Gather node even though the subpath was
* subpath was projection-capable. So, if the subpath is capable of * projection-capable. So, if the subpath is capable of projection or the
* projection or the desired tlist is the same expression-wise as the * desired tlist is the same expression-wise as the subplan's, just jam it
* subplan's, just jam it in there. We'll have charged for a Result that * in there. We'll have charged for a Result that doesn't actually appear
* doesn't actually appear in the plan, but that's better than having a * in the plan, but that's better than having a Result we don't need.
* Result we don't need.
*/ */
if (is_projection_capable_path(best_path->subpath) || if (is_projection_capable_path(best_path->subpath) ||
tlist_same_exprs(tlist, subplan->targetlist)) tlist_same_exprs(tlist, subplan->targetlist))
@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/* /*
* If a join between foreign relations was pushed down, remember it. The * If a join between foreign relations was pushed down, remember it. The
* push-down safety of the join depends upon the server and user mapping * push-down safety of the join depends upon the server and user mapping
* being same. That can change between planning and execution time, in which * being same. That can change between planning and execution time, in
* case the plan should be invalidated. * which case the plan should be invalidated.
*/ */
if (scan_relid == 0) if (scan_relid == 0)
root->glob->hasForeignJoin = true; root->glob->hasForeignJoin = true;
@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/* /*
* Replace any outer-relation variables with nestloop params in the qual, * Replace any outer-relation variables with nestloop params in the qual,
* fdw_exprs and fdw_recheck_quals expressions. We do this last so that * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
* or fdw_recheck_quals could have come from join clauses, so doing this * fdw_recheck_quals could have come from join clauses, so doing this
* beforehand on the scan_clauses wouldn't work.) We assume * beforehand on the scan_clauses wouldn't work.) We assume
* fdw_scan_tlist contains no such variables. * fdw_scan_tlist contains no such variables.
*/ */
@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
* 0, but there can be no Var with relid 0 in the rel's targetlist or the * 0, but there can be no Var with relid 0 in the rel's targetlist or the
* restriction clauses, so we skip this in that case. Note that any such * restriction clauses, so we skip this in that case. Note that any such
* columns in base relations that were joined are assumed to be contained * columns in base relations that were joined are assumed to be contained
* in fdw_scan_tlist.) This is a bit of a kluge and might go away someday, * in fdw_scan_tlist.) This is a bit of a kluge and might go away
* so we intentionally leave it out of the API presented to FDWs. * someday, so we intentionally leave it out of the API presented to FDWs.
*/ */
scan_plan->fsSystemCol = false; scan_plan->fsSystemCol = false;
if (scan_relid > 0) if (scan_relid > 0)
@ -5899,7 +5898,7 @@ make_gather(List *qptlist,
plan->righttree = NULL; plan->righttree = NULL;
node->num_workers = nworkers; node->num_workers = nworkers;
node->single_copy = single_copy; node->single_copy = single_copy;
node->invisible = false; node->invisible = false;
return node; return node;
} }

View File

@ -108,10 +108,10 @@ static double get_number_of_groups(PlannerInfo *root,
List *rollup_lists, List *rollup_lists,
List *rollup_groupclauses); List *rollup_groupclauses);
static void set_grouped_rel_consider_parallel(PlannerInfo *root, static void set_grouped_rel_consider_parallel(PlannerInfo *root,
RelOptInfo *grouped_rel, RelOptInfo *grouped_rel,
PathTarget *target); PathTarget *target);
static Size estimate_hashagg_tablesize(Path *path, AggClauseCosts *agg_costs, static Size estimate_hashagg_tablesize(Path *path, AggClauseCosts *agg_costs,
double dNumGroups); double dNumGroups);
static RelOptInfo *create_grouping_paths(PlannerInfo *root, static RelOptInfo *create_grouping_paths(PlannerInfo *root,
RelOptInfo *input_rel, RelOptInfo *input_rel,
PathTarget *target, PathTarget *target,
@ -141,7 +141,7 @@ static RelOptInfo *create_ordered_paths(PlannerInfo *root,
static PathTarget *make_group_input_target(PlannerInfo *root, static PathTarget *make_group_input_target(PlannerInfo *root,
PathTarget *final_target); PathTarget *final_target);
static PathTarget *make_partialgroup_input_target(PlannerInfo *root, static PathTarget *make_partialgroup_input_target(PlannerInfo *root,
PathTarget *final_target); PathTarget *final_target);
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist); static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists); static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
static PathTarget *make_window_input_target(PlannerInfo *root, static PathTarget *make_window_input_target(PlannerInfo *root,
@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
* findable from the PlannerInfo struct; anything else the FDW wants * findable from the PlannerInfo struct; anything else the FDW wants
* to know should be obtainable via "root". * to know should be obtainable via "root".
* *
* Note: CustomScan providers, as well as FDWs that don't want to * Note: CustomScan providers, as well as FDWs that don't want to use
* use this hook, can use the create_upper_paths_hook; see below. * this hook, can use the create_upper_paths_hook; see below.
*/ */
if (current_rel->fdwroutine && if (current_rel->fdwroutine &&
current_rel->fdwroutine->GetForeignUpperPaths) current_rel->fdwroutine->GetForeignUpperPaths)
@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel,
/* /*
* All that's left to check now is to make sure all aggregate functions * All that's left to check now is to make sure all aggregate functions
* support partial mode. If there's no aggregates then we can skip checking * support partial mode. If there's no aggregates then we can skip
* that. * checking that.
*/ */
if (!parse->hasAggs) if (!parse->hasAggs)
grouped_rel->consider_parallel = true; grouped_rel->consider_parallel = true;
@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Determine whether it's possible to perform sort-based implementations * Determine whether it's possible to perform sort-based implementations
* of grouping. (Note that if groupClause is empty, grouping_is_sortable() * of grouping. (Note that if groupClause is empty,
* is trivially true, and all the pathkeys_contained_in() tests will * grouping_is_sortable() is trivially true, and all the
* succeed too, so that we'll consider every surviving input path.) * pathkeys_contained_in() tests will succeed too, so that we'll consider
* every surviving input path.)
*/ */
can_sort = grouping_is_sortable(parse->groupClause); can_sort = grouping_is_sortable(parse->groupClause);
@ -3408,7 +3409,7 @@ create_grouping_paths(PlannerInfo *root,
*/ */
if (grouped_rel->consider_parallel) if (grouped_rel->consider_parallel)
{ {
Path *cheapest_partial_path = linitial(input_rel->partial_pathlist); Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
/* /*
* Build target list for partial aggregate paths. We cannot reuse the * Build target list for partial aggregate paths. We cannot reuse the
@ -3471,27 +3472,27 @@ create_grouping_paths(PlannerInfo *root,
if (parse->hasAggs) if (parse->hasAggs)
add_partial_path(grouped_rel, (Path *) add_partial_path(grouped_rel, (Path *)
create_agg_path(root, create_agg_path(root,
grouped_rel, grouped_rel,
path, path,
partial_grouping_target, partial_grouping_target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN, parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause, parse->groupClause,
NIL, NIL,
&agg_partial_costs, &agg_partial_costs,
dNumPartialGroups, dNumPartialGroups,
false, false,
false, false,
true)); true));
else else
add_partial_path(grouped_rel, (Path *) add_partial_path(grouped_rel, (Path *)
create_group_path(root, create_group_path(root,
grouped_rel, grouped_rel,
path, path,
partial_grouping_target, partial_grouping_target,
parse->groupClause, parse->groupClause,
NIL, NIL,
dNumPartialGroups)); dNumPartialGroups));
} }
} }
} }
@ -3513,18 +3514,18 @@ create_grouping_paths(PlannerInfo *root,
if (hashaggtablesize < work_mem * 1024L) if (hashaggtablesize < work_mem * 1024L)
{ {
add_partial_path(grouped_rel, (Path *) add_partial_path(grouped_rel, (Path *)
create_agg_path(root, create_agg_path(root,
grouped_rel, grouped_rel,
cheapest_partial_path, cheapest_partial_path,
partial_grouping_target, partial_grouping_target,
AGG_HASHED, AGG_HASHED,
parse->groupClause, parse->groupClause,
NIL, NIL,
&agg_partial_costs, &agg_partial_costs,
dNumPartialGroups, dNumPartialGroups,
false, false,
false, false,
true)); true));
} }
} }
} }
@ -3616,13 +3617,13 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Now generate a complete GroupAgg Path atop of the cheapest partial * Now generate a complete GroupAgg Path atop of the cheapest partial
* path. We need only bother with the cheapest path here, as the output * path. We need only bother with the cheapest path here, as the
* of Gather is never sorted. * output of Gather is never sorted.
*/ */
if (grouped_rel->partial_pathlist) if (grouped_rel->partial_pathlist)
{ {
Path *path = (Path *) linitial(grouped_rel->partial_pathlist); Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
double total_groups = path->rows * path->parallel_workers; double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root, path = (Path *) create_gather_path(root,
grouped_rel, grouped_rel,
@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root,
&total_groups); &total_groups);
/* /*
* Gather is always unsorted, so we'll need to sort, unless there's * Gather is always unsorted, so we'll need to sort, unless
* no GROUP BY clause, in which case there will only be a single * there's no GROUP BY clause, in which case there will only be a
* group. * single group.
*/ */
if (parse->groupClause) if (parse->groupClause)
path = (Path *) create_sort_path(root, path = (Path *) create_sort_path(root,
@ -3645,27 +3646,27 @@ create_grouping_paths(PlannerInfo *root,
if (parse->hasAggs) if (parse->hasAggs)
add_path(grouped_rel, (Path *) add_path(grouped_rel, (Path *)
create_agg_path(root, create_agg_path(root,
grouped_rel, grouped_rel,
path, path,
target, target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN, parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause, parse->groupClause,
(List *) parse->havingQual, (List *) parse->havingQual,
&agg_final_costs, &agg_final_costs,
dNumGroups, dNumGroups,
true, true,
true, true,
true)); true));
else else
add_path(grouped_rel, (Path *) add_path(grouped_rel, (Path *)
create_group_path(root, create_group_path(root,
grouped_rel, grouped_rel,
path, path,
target, target,
parse->groupClause, parse->groupClause,
(List *) parse->havingQual, (List *) parse->havingQual,
dNumGroups)); dNumGroups));
} }
} }
@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Provided that the estimated size of the hashtable does not exceed * Provided that the estimated size of the hashtable does not exceed
* work_mem, we'll generate a HashAgg Path, although if we were unable * work_mem, we'll generate a HashAgg Path, although if we were unable
* to sort above, then we'd better generate a Path, so that we at least * to sort above, then we'd better generate a Path, so that we at
* have one. * least have one.
*/ */
if (hashaggtablesize < work_mem * 1024L || if (hashaggtablesize < work_mem * 1024L ||
grouped_rel->pathlist == NIL) grouped_rel->pathlist == NIL)
{ {
/* /*
* We just need an Agg over the cheapest-total input path, since input * We just need an Agg over the cheapest-total input path, since
* order won't matter. * input order won't matter.
*/ */
add_path(grouped_rel, (Path *) add_path(grouped_rel, (Path *)
create_agg_path(root, grouped_rel, create_agg_path(root, grouped_rel,
@ -3704,12 +3705,12 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Generate a HashAgg Path atop of the cheapest partial path. Once * Generate a HashAgg Path atop of the cheapest partial path. Once
* again, we'll only do this if it looks as though the hash table won't * again, we'll only do this if it looks as though the hash table
* exceed work_mem. * won't exceed work_mem.
*/ */
if (grouped_rel->partial_pathlist) if (grouped_rel->partial_pathlist)
{ {
Path *path = (Path *) linitial(grouped_rel->partial_pathlist); Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
hashaggtablesize = estimate_hashagg_tablesize(path, hashaggtablesize = estimate_hashagg_tablesize(path,
&agg_final_costs, &agg_final_costs,
@ -3717,7 +3718,7 @@ create_grouping_paths(PlannerInfo *root,
if (hashaggtablesize < work_mem * 1024L) if (hashaggtablesize < work_mem * 1024L)
{ {
double total_groups = path->rows * path->parallel_workers; double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root, path = (Path *) create_gather_path(root,
grouped_rel, grouped_rel,
@ -3727,18 +3728,18 @@ create_grouping_paths(PlannerInfo *root,
&total_groups); &total_groups);
add_path(grouped_rel, (Path *) add_path(grouped_rel, (Path *)
create_agg_path(root, create_agg_path(root,
grouped_rel, grouped_rel,
path, path,
target, target,
AGG_HASHED, AGG_HASHED,
parse->groupClause, parse->groupClause,
(List *) parse->havingQual, (List *) parse->havingQual,
&agg_final_costs, &agg_final_costs,
dNumGroups, dNumGroups,
true, true,
true, true,
true)); true));
} }
} }
} }

View File

@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist,
continue; continue;
if (aggref->aggvariadic != tlistaggref->aggvariadic) if (aggref->aggvariadic != tlistaggref->aggvariadic)
continue; continue;
/* /*
* it would be harmless to compare aggcombine and aggpartial, but * it would be harmless to compare aggcombine and aggpartial, but
* it's also unnecessary * it's also unnecessary

View File

@ -101,7 +101,7 @@ typedef struct
} has_parallel_hazard_arg; } has_parallel_hazard_arg;
static bool aggregates_allow_partial_walker(Node *node, static bool aggregates_allow_partial_walker(Node *node,
partial_agg_context *context); partial_agg_context *context);
static bool contain_agg_clause_walker(Node *node, void *context); static bool contain_agg_clause_walker(Node *node, void *context);
static bool count_agg_clauses_walker(Node *node, static bool count_agg_clauses_walker(Node *node,
count_agg_clauses_context *context); count_agg_clauses_context *context);
@ -112,9 +112,9 @@ static bool contain_mutable_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_walker(Node *node, void *context); static bool contain_volatile_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context); static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context);
static bool has_parallel_hazard_walker(Node *node, static bool has_parallel_hazard_walker(Node *node,
has_parallel_hazard_arg *context); has_parallel_hazard_arg *context);
static bool parallel_too_dangerous(char proparallel, static bool parallel_too_dangerous(char proparallel,
has_parallel_hazard_arg *context); has_parallel_hazard_arg *context);
static bool typeid_is_temp(Oid typeid); static bool typeid_is_temp(Oid typeid);
static bool contain_nonstrict_functions_walker(Node *node, void *context); static bool contain_nonstrict_functions_walker(Node *node, void *context);
static bool contain_leaked_vars_walker(Node *node, void *context); static bool contain_leaked_vars_walker(Node *node, void *context);
@ -446,7 +446,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
if (aggref->aggdistinct || aggref->aggorder) if (aggref->aggdistinct || aggref->aggorder)
{ {
context->allowedtype = PAT_DISABLED; context->allowedtype = PAT_DISABLED;
return true; /* abort search */ return true; /* abort search */
} }
aggTuple = SearchSysCache1(AGGFNOID, aggTuple = SearchSysCache1(AGGFNOID,
ObjectIdGetDatum(aggref->aggfnoid)); ObjectIdGetDatum(aggref->aggfnoid));
@ -463,7 +463,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
{ {
ReleaseSysCache(aggTuple); ReleaseSysCache(aggTuple);
context->allowedtype = PAT_DISABLED; context->allowedtype = PAT_DISABLED;
return true; /* abort search */ return true; /* abort search */
} }
/* /*
@ -479,7 +479,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
context->allowedtype = PAT_INTERNAL_ONLY; context->allowedtype = PAT_INTERNAL_ONLY;
ReleaseSysCache(aggTuple); ReleaseSysCache(aggTuple);
return false; /* continue searching */ return false; /* continue searching */
} }
return expression_tree_walker(node, aggregates_allow_partial_walker, return expression_tree_walker(node, aggregates_allow_partial_walker,
(void *) context); (void *) context);
@ -1354,7 +1354,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context)
bool bool
has_parallel_hazard(Node *node, bool allow_restricted) has_parallel_hazard(Node *node, bool allow_restricted)
{ {
has_parallel_hazard_arg context; has_parallel_hazard_arg context;
context.allow_restricted = allow_restricted; context.allow_restricted = allow_restricted;
return has_parallel_hazard_walker(node, &context); return has_parallel_hazard_walker(node, &context);
@ -1371,16 +1371,16 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
* recurse through Query objects to as to locate parallel-unsafe * recurse through Query objects to as to locate parallel-unsafe
* constructs anywhere in the tree. * constructs anywhere in the tree.
* *
* Later, we'll be called again for specific quals, possibly after * Later, we'll be called again for specific quals, possibly after some
* some planning has been done, we may encounter SubPlan, SubLink, * planning has been done, we may encounter SubPlan, SubLink, or
* or AlternativeSubLink nodes. Currently, there's no need to recurse * AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared * through these; they can't be unsafe, since we've already cleared the
* the entire query of unsafe operations, and they're definitely * entire query of unsafe operations, and they're definitely
* parallel-restricted. * parallel-restricted.
*/ */
if (IsA(node, Query)) if (IsA(node, Query))
{ {
Query *query = (Query *) node; Query *query = (Query *) node;
if (query->rowMarks != NULL) if (query->rowMarks != NULL)
return true; return true;
@ -1390,12 +1390,12 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
has_parallel_hazard_walker, has_parallel_hazard_walker,
context, 0); context, 0);
} }
else if (IsA(node, SubPlan) || IsA(node, SubLink) || else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
IsA(node, AlternativeSubPlan) || IsA(node, Param)) IsA(node, AlternativeSubPlan) ||IsA(node, Param))
{ {
/* /*
* Since we don't have the ability to push subplans down to workers * Since we don't have the ability to push subplans down to workers at
* at present, we treat subplan references as parallel-restricted. * present, we treat subplan references as parallel-restricted.
*/ */
if (!context->allow_restricted) if (!context->allow_restricted)
return true; return true;
@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
if (IsA(node, RestrictInfo)) if (IsA(node, RestrictInfo))
{ {
RestrictInfo *rinfo = (RestrictInfo *) node; RestrictInfo *rinfo = (RestrictInfo *) node;
return has_parallel_hazard_walker((Node *) rinfo->clause, context); return has_parallel_hazard_walker((Node *) rinfo->clause, context);
} }
/* /*
* It is an error for a parallel worker to touch a temporary table in any * It is an error for a parallel worker to touch a temporary table in any
* way, so we can't handle nodes whose type is the rowtype of such a table. * way, so we can't handle nodes whose type is the rowtype of such a
* table.
*/ */
if (!context->allow_restricted) if (!context->allow_restricted)
{ {
@ -1534,7 +1536,8 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
foreach(opid, rcexpr->opnos) foreach(opid, rcexpr->opnos)
{ {
Oid opfuncid = get_opcode(lfirst_oid(opid)); Oid opfuncid = get_opcode(lfirst_oid(opid));
if (parallel_too_dangerous(func_parallel(opfuncid), context)) if (parallel_too_dangerous(func_parallel(opfuncid), context))
return true; return true;
} }
@ -1558,7 +1561,7 @@ parallel_too_dangerous(char proparallel, has_parallel_hazard_arg *context)
static bool static bool
typeid_is_temp(Oid typeid) typeid_is_temp(Oid typeid)
{ {
Oid relid = get_typ_typrelid(typeid); Oid relid = get_typ_typrelid(typeid);
if (!OidIsValid(relid)) if (!OidIsValid(relid))
return false; return false;
@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context)
/* /*
* WHERE CURRENT OF doesn't contain function calls. Moreover, it * WHERE CURRENT OF doesn't contain function calls. Moreover, it
* is important that this can be pushed down into a * is important that this can be pushed down into a
* security_barrier view, since the planner must always generate * security_barrier view, since the planner must always generate a
* a TID scan when CURRENT OF is present -- c.f. cost_tidscan. * TID scan when CURRENT OF is present -- c.f. cost_tidscan.
*/ */
return false; return false;

View File

@ -709,7 +709,7 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
AttrNumber natt; AttrNumber natt;
Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */ Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */
Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */ Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */
int nplain = 0; /* # plain attrs observed */ int nplain = 0; /* # plain attrs observed */
/* /*
* If inference specification element lacks collation/opclass, then no * If inference specification element lacks collation/opclass, then no

View File

@ -107,7 +107,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind)
rel->consider_startup = (root->tuple_fraction > 0); rel->consider_startup = (root->tuple_fraction > 0);
rel->consider_param_startup = false; /* might get changed later */ rel->consider_param_startup = false; /* might get changed later */
rel->consider_parallel = false; /* might get changed later */ rel->consider_parallel = false; /* might get changed later */
rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */ rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
rel->reltarget = create_empty_pathtarget(); rel->reltarget = create_empty_pathtarget();
rel->reltarget_has_non_vars = false; rel->reltarget_has_non_vars = false;
rel->pathlist = NIL; rel->pathlist = NIL;

View File

@ -776,11 +776,11 @@ apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target)
void void
apply_partialaggref_adjustment(PathTarget *target) apply_partialaggref_adjustment(PathTarget *target)
{ {
ListCell *lc; ListCell *lc;
foreach(lc, target->exprs) foreach(lc, target->exprs)
{ {
Aggref *aggref = (Aggref *) lfirst(lc); Aggref *aggref = (Aggref *) lfirst(lc);
if (IsA(aggref, Aggref)) if (IsA(aggref, Aggref))
{ {

View File

@ -3083,8 +3083,8 @@ errorMissingColumn(ParseState *pstate,
errmsg("column %s.%s does not exist", relname, colname) : errmsg("column %s.%s does not exist", relname, colname) :
errmsg("column \"%s\" does not exist", colname), errmsg("column \"%s\" does not exist", colname),
state->rfirst ? closestfirst ? state->rfirst ? closestfirst ?
errhint("Perhaps you meant to reference the column \"%s.%s\".", errhint("Perhaps you meant to reference the column \"%s.%s\".",
state->rfirst->eref->aliasname, closestfirst) : state->rfirst->eref->aliasname, closestfirst) :
errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.", errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.",
colname, state->rfirst->eref->aliasname) : 0, colname, state->rfirst->eref->aliasname) : 0,
parser_errposition(pstate, location))); parser_errposition(pstate, location)));

Some files were not shown because too many files have changed in this diff Show More