mirror of
https://github.com/zebrajr/postgres.git
synced 2025-12-07 00:20:24 +01:00
Post-PG 10 beta1 pgindent run
perltidy run not included.
This commit is contained in:
parent
8a94332478
commit
a6fd7b7a5f
|
|
@ -165,11 +165,11 @@ blbuildempty(Relation index)
|
|||
BloomFillMetapage(index, metapage);
|
||||
|
||||
/*
|
||||
* Write the page and log it. It might seem that an immediate sync
|
||||
* would be sufficient to guarantee that the file exists on disk, but
|
||||
* recovery itself might remove it while replaying, for example, an
|
||||
* XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we
|
||||
* need this even when wal_level=minimal.
|
||||
* Write the page and log it. It might seem that an immediate sync would
|
||||
* be sufficient to guarantee that the file exists on disk, but recovery
|
||||
* itself might remove it while replaying, for example, an
|
||||
* XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we need
|
||||
* this even when wal_level=minimal.
|
||||
*/
|
||||
PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
|
||||
smgrwrite(index->rd_smgr, INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ _PG_init(void)
|
|||
bl_relopt_tab[i + 1].optname = MemoryContextStrdup(TopMemoryContext,
|
||||
buf);
|
||||
bl_relopt_tab[i + 1].opttype = RELOPT_TYPE_INT;
|
||||
bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) + sizeof(int) * i;
|
||||
bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) +sizeof(int) * i;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -152,16 +152,19 @@ xpstrdup(const char *in)
|
|||
return pstrdup(in);
|
||||
}
|
||||
|
||||
static void pg_attribute_noreturn()
|
||||
static void
|
||||
pg_attribute_noreturn()
|
||||
dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2)
|
||||
{
|
||||
char *msg = pchomp(PQerrorMessage(conn));
|
||||
|
||||
if (res)
|
||||
PQclear(res);
|
||||
elog(ERROR, "%s: %s", p2, msg);
|
||||
}
|
||||
|
||||
static void pg_attribute_noreturn()
|
||||
static void
|
||||
pg_attribute_noreturn()
|
||||
dblink_conn_not_avail(const char *conname)
|
||||
{
|
||||
if (conname)
|
||||
|
|
@ -176,7 +179,7 @@ dblink_conn_not_avail(const char *conname)
|
|||
|
||||
static void
|
||||
dblink_get_conn(char *conname_or_str,
|
||||
PGconn * volatile *conn_p, char **conname_p, volatile bool *freeconn_p)
|
||||
PGconn *volatile * conn_p, char **conname_p, volatile bool *freeconn_p)
|
||||
{
|
||||
remoteConn *rconn = getConnectionByName(conname_or_str);
|
||||
PGconn *conn;
|
||||
|
|
@ -201,6 +204,7 @@ dblink_get_conn(char *conname_or_str,
|
|||
if (PQstatus(conn) == CONNECTION_BAD)
|
||||
{
|
||||
char *msg = pchomp(PQerrorMessage(conn));
|
||||
|
||||
PQfinish(conn);
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
||||
|
|
@ -223,6 +227,7 @@ static PGconn *
|
|||
dblink_get_named_conn(const char *conname)
|
||||
{
|
||||
remoteConn *rconn = getConnectionByName(conname);
|
||||
|
||||
if (rconn)
|
||||
return rconn->conn;
|
||||
|
||||
|
|
@ -2699,9 +2704,9 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
|
|||
message_context = xpstrdup(pg_diag_context);
|
||||
|
||||
/*
|
||||
* If we don't get a message from the PGresult, try the PGconn. This
|
||||
* is needed because for connection-level failures, PQexec may just
|
||||
* return NULL, not a PGresult at all.
|
||||
* If we don't get a message from the PGresult, try the PGconn. This is
|
||||
* needed because for connection-level failures, PQexec may just return
|
||||
* NULL, not a PGresult at all.
|
||||
*/
|
||||
if (message_primary == NULL)
|
||||
message_primary = pchomp(PQerrorMessage(conn));
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ verify_hash_page(bytea *raw_page, int flags)
|
|||
* -------------------------------------------------
|
||||
*/
|
||||
static void
|
||||
GetHashPageStatistics(Page page, HashPageStat * stat)
|
||||
GetHashPageStatistics(Page page, HashPageStat *stat)
|
||||
{
|
||||
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
|
||||
HashPageOpaque opaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
||||
|
|
|
|||
|
|
@ -315,5 +315,5 @@ page_checksum(PG_FUNCTION_ARGS)
|
|||
|
||||
page = (PageHeader) VARDATA(raw_page);
|
||||
|
||||
PG_RETURN_INT16(pg_checksum_page((char *)page, blkno));
|
||||
PG_RETURN_INT16(pg_checksum_page((char *) page, blkno));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -238,7 +238,7 @@ px_find_digest(const char *name, PX_MD **res)
|
|||
* prototype for the EVP functions that return an algorithm, e.g.
|
||||
* EVP_aes_128_cbc().
|
||||
*/
|
||||
typedef const EVP_CIPHER *(*ossl_EVP_cipher_func)(void);
|
||||
typedef const EVP_CIPHER *(*ossl_EVP_cipher_func) (void);
|
||||
|
||||
/*
|
||||
* ossl_cipher contains the static information about each cipher.
|
||||
|
|
@ -706,13 +706,15 @@ static const struct ossl_cipher ossl_cast_cbc = {
|
|||
|
||||
static const struct ossl_cipher ossl_aes_ecb = {
|
||||
ossl_aes_ecb_init,
|
||||
NULL, /* EVP_aes_XXX_ecb(), determined in init function */
|
||||
NULL, /* EVP_aes_XXX_ecb(), determined in init
|
||||
* function */
|
||||
128 / 8, 256 / 8
|
||||
};
|
||||
|
||||
static const struct ossl_cipher ossl_aes_cbc = {
|
||||
ossl_aes_cbc_init,
|
||||
NULL, /* EVP_aes_XXX_cbc(), determined in init function */
|
||||
NULL, /* EVP_aes_XXX_cbc(), determined in init
|
||||
* function */
|
||||
128 / 8, 256 / 8
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -454,8 +454,8 @@ pg_random_uuid(PG_FUNCTION_ARGS)
|
|||
uint8 *buf = (uint8 *) palloc(UUID_LEN);
|
||||
|
||||
/*
|
||||
* Generate random bits. pg_backend_random() will do here, we don't
|
||||
* promis UUIDs to be cryptographically random, when built with
|
||||
* Generate random bits. pg_backend_random() will do here, we don't promis
|
||||
* UUIDs to be cryptographically random, when built with
|
||||
* --disable-strong-random.
|
||||
*/
|
||||
if (!pg_backend_random((char *) buf, UUID_LEN))
|
||||
|
|
|
|||
|
|
@ -99,7 +99,10 @@ pgrowlocks(PG_FUNCTION_ARGS)
|
|||
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
|
||||
rel = heap_openrv(relrv, AccessShareLock);
|
||||
|
||||
/* check permissions: must have SELECT on table or be in pg_stat_scan_tables */
|
||||
/*
|
||||
* check permissions: must have SELECT on table or be in
|
||||
* pg_stat_scan_tables
|
||||
*/
|
||||
aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
|
||||
ACL_SELECT);
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
|
|
|
|||
|
|
@ -1017,8 +1017,8 @@ deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
|
|||
{
|
||||
/*
|
||||
* For a relation that is deparsed as a subquery, emit expressions
|
||||
* specified in the relation's reltarget. Note that since this is
|
||||
* for the subquery, no need to care about *retrieved_attrs.
|
||||
* specified in the relation's reltarget. Note that since this is for
|
||||
* the subquery, no need to care about *retrieved_attrs.
|
||||
*/
|
||||
deparseSubqueryTargetList(context);
|
||||
}
|
||||
|
|
@ -2189,8 +2189,8 @@ deparseVar(Var *node, deparse_expr_cxt *context)
|
|||
|
||||
/*
|
||||
* If the Var belongs to the foreign relation that is deparsed as a
|
||||
* subquery, use the relation and column alias to the Var provided
|
||||
* by the subquery, instead of the remote name.
|
||||
* subquery, use the relation and column alias to the Var provided by the
|
||||
* subquery, instead of the remote name.
|
||||
*/
|
||||
if (is_subquery_var(node, context->scanrel, &relno, &colno))
|
||||
{
|
||||
|
|
|
|||
|
|
@ -4170,8 +4170,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
|
|||
fpinfo->jointype = jointype;
|
||||
|
||||
/*
|
||||
* By default, both the input relations are not required to be deparsed
|
||||
* as subqueries, but there might be some relations covered by the input
|
||||
* By default, both the input relations are not required to be deparsed as
|
||||
* subqueries, but there might be some relations covered by the input
|
||||
* relations that are required to be deparsed as subqueries, so save the
|
||||
* relids of those relations for later use by the deparser.
|
||||
*/
|
||||
|
|
@ -4227,8 +4227,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
|
|||
case JOIN_FULL:
|
||||
|
||||
/*
|
||||
* In this case, if any of the input relations has conditions,
|
||||
* we need to deparse that relation as a subquery so that the
|
||||
* In this case, if any of the input relations has conditions, we
|
||||
* need to deparse that relation as a subquery so that the
|
||||
* conditions can be evaluated before the join. Remember it in
|
||||
* the fpinfo of this relation so that the deparser can take
|
||||
* appropriate action. Also, save the relids of base relations
|
||||
|
|
@ -4869,7 +4869,7 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
|
|||
fpinfo->table = ifpinfo->table;
|
||||
fpinfo->server = ifpinfo->server;
|
||||
fpinfo->user = ifpinfo->user;
|
||||
merge_fdw_options(fpinfo, ifpinfo , NULL);
|
||||
merge_fdw_options(fpinfo, ifpinfo, NULL);
|
||||
|
||||
/* Assess if it is safe to push down aggregation and grouping. */
|
||||
if (!foreign_grouping_ok(root, grouped_rel))
|
||||
|
|
|
|||
|
|
@ -977,7 +977,8 @@ brin_desummarize_range(PG_FUNCTION_ARGS)
|
|||
RelationGetRelationName(indexRel))));
|
||||
|
||||
/* the revmap does the hard work */
|
||||
do {
|
||||
do
|
||||
{
|
||||
done = brinRevmapDesummarizeRange(indexRel, heapBlk);
|
||||
}
|
||||
while (!done);
|
||||
|
|
|
|||
|
|
@ -140,9 +140,9 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
|
|||
* exclusive cleanup lock. This guarantees that no insertions currently
|
||||
* happen in this subtree. Caller also acquire Exclusive lock on deletable
|
||||
* page and is acquiring and releasing exclusive lock on left page before.
|
||||
* Left page was locked and released. Then parent and this page are locked.
|
||||
* We acquire left page lock here only to mark page dirty after changing
|
||||
* right pointer.
|
||||
* Left page was locked and released. Then parent and this page are
|
||||
* locked. We acquire left page lock here only to mark page dirty after
|
||||
* changing right pointer.
|
||||
*/
|
||||
lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
|
||||
RBM_NORMAL, gvs->strategy);
|
||||
|
|
@ -258,7 +258,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
|
|||
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
|
||||
RBM_NORMAL, gvs->strategy);
|
||||
|
||||
if(!isRoot)
|
||||
if (!isRoot)
|
||||
LockBuffer(buffer, GIN_EXCLUSIVE);
|
||||
|
||||
page = BufferGetPage(buffer);
|
||||
|
|
@ -295,7 +295,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
|
|||
}
|
||||
}
|
||||
|
||||
if(!isRoot)
|
||||
if (!isRoot)
|
||||
LockBuffer(buffer, GIN_UNLOCK);
|
||||
|
||||
ReleaseBuffer(buffer);
|
||||
|
|
@ -326,7 +326,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
|
|||
RBM_NORMAL, gvs->strategy);
|
||||
page = BufferGetPage(buffer);
|
||||
|
||||
ginTraverseLock(buffer,false);
|
||||
ginTraverseLock(buffer, false);
|
||||
|
||||
Assert(GinPageIsData(page));
|
||||
|
||||
|
|
@ -351,11 +351,11 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
|
|||
bool hasEmptyChild = FALSE;
|
||||
bool hasNonEmptyChild = FALSE;
|
||||
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
|
||||
BlockNumber* children = palloc(sizeof(BlockNumber) * (maxoff + 1));
|
||||
BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
|
||||
|
||||
/*
|
||||
* Read all children BlockNumbers.
|
||||
* Not sure it is safe if there are many concurrent vacuums.
|
||||
* Read all children BlockNumbers. Not sure it is safe if there are
|
||||
* many concurrent vacuums.
|
||||
*/
|
||||
|
||||
for (i = FirstOffsetNumber; i <= maxoff; i++)
|
||||
|
|
@ -380,14 +380,14 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
|
|||
vacuum_delay_point();
|
||||
|
||||
/*
|
||||
* All subtree is empty - just return TRUE to indicate that parent must
|
||||
* do a cleanup. Unless we are ROOT an there is way to go upper.
|
||||
* All subtree is empty - just return TRUE to indicate that parent
|
||||
* must do a cleanup. Unless we are ROOT an there is way to go upper.
|
||||
*/
|
||||
|
||||
if(hasEmptyChild && !hasNonEmptyChild && !isRoot)
|
||||
if (hasEmptyChild && !hasNonEmptyChild && !isRoot)
|
||||
return TRUE;
|
||||
|
||||
if(hasEmptyChild)
|
||||
if (hasEmptyChild)
|
||||
{
|
||||
DataPageDeleteStack root,
|
||||
*ptr,
|
||||
|
|
|
|||
|
|
@ -333,12 +333,12 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
|
|||
if (scan->kill_prior_tuple)
|
||||
{
|
||||
/*
|
||||
* Yes, so remember it for later. (We'll deal with all such
|
||||
* tuples at once right after leaving the index page or at
|
||||
* end of scan.) In case if caller reverses the indexscan
|
||||
* direction it is quite possible that the same item might
|
||||
* get entered multiple times. But, we don't detect that;
|
||||
* instead, we just forget any excess entries.
|
||||
* Yes, so remember it for later. (We'll deal with all such tuples
|
||||
* at once right after leaving the index page or at end of scan.)
|
||||
* In case if caller reverses the indexscan direction it is quite
|
||||
* possible that the same item might get entered multiple times.
|
||||
* But, we don't detect that; instead, we just forget any excess
|
||||
* entries.
|
||||
*/
|
||||
if (so->killedItems == NULL)
|
||||
so->killedItems = palloc(MaxIndexTuplesPerPage *
|
||||
|
|
@ -477,9 +477,8 @@ hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
|
|||
Relation rel = scan->indexRelation;
|
||||
|
||||
/*
|
||||
* Before leaving current page, deal with any killed items.
|
||||
* Also, ensure that we acquire lock on current page before
|
||||
* calling _hash_kill_items.
|
||||
* Before leaving current page, deal with any killed items. Also, ensure
|
||||
* that we acquire lock on current page before calling _hash_kill_items.
|
||||
*/
|
||||
if (so->numKilled > 0)
|
||||
{
|
||||
|
|
@ -516,9 +515,8 @@ hashendscan(IndexScanDesc scan)
|
|||
Relation rel = scan->indexRelation;
|
||||
|
||||
/*
|
||||
* Before leaving current page, deal with any killed items.
|
||||
* Also, ensure that we acquire lock on current page before
|
||||
* calling _hash_kill_items.
|
||||
* Before leaving current page, deal with any killed items. Also, ensure
|
||||
* that we acquire lock on current page before calling _hash_kill_items.
|
||||
*/
|
||||
if (so->numKilled > 0)
|
||||
{
|
||||
|
|
@ -889,8 +887,8 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
|
|||
|
||||
/*
|
||||
* Let us mark the page as clean if vacuum removes the DEAD tuples
|
||||
* from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES
|
||||
* flag.
|
||||
* from an index page. We do this by clearing
|
||||
* LH_PAGE_HAS_DEAD_TUPLES flag.
|
||||
*/
|
||||
if (tuples_removed && *tuples_removed > 0 &&
|
||||
H_HAS_DEAD_TUPLES(opaque))
|
||||
|
|
|
|||
|
|
@ -984,9 +984,9 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
|
|||
return latestRemovedXid;
|
||||
|
||||
/*
|
||||
* Check if WAL replay has reached a consistent database state. If not,
|
||||
* we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid
|
||||
* for more details.
|
||||
* Check if WAL replay has reached a consistent database state. If not, we
|
||||
* must PANIC. See the definition of
|
||||
* btree_xlog_delete_get_latestRemovedXid for more details.
|
||||
*/
|
||||
if (!reachedConsistency)
|
||||
elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data");
|
||||
|
|
@ -1146,8 +1146,8 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
|
|||
}
|
||||
|
||||
/*
|
||||
* Mark the page as not containing any LP_DEAD items. See comments
|
||||
* in _hash_vacuum_one_page() for details.
|
||||
* Mark the page as not containing any LP_DEAD items. See comments in
|
||||
* _hash_vacuum_one_page() for details.
|
||||
*/
|
||||
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
||||
pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
|
||||
|
|
|
|||
|
|
@ -63,8 +63,8 @@ restart_insert:
|
|||
|
||||
/*
|
||||
* Read the metapage. We don't lock it yet; HashMaxItemSize() will
|
||||
* examine pd_pagesize_version, but that can't change so we can examine
|
||||
* it without a lock.
|
||||
* examine pd_pagesize_version, but that can't change so we can examine it
|
||||
* without a lock.
|
||||
*/
|
||||
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
|
||||
metapage = BufferGetPage(metabuf);
|
||||
|
|
@ -126,10 +126,9 @@ restart_insert:
|
|||
BlockNumber nextblkno;
|
||||
|
||||
/*
|
||||
* Check if current page has any DEAD tuples. If yes,
|
||||
* delete these tuples and see if we can get a space for
|
||||
* the new item to be inserted before moving to the next
|
||||
* page in the bucket chain.
|
||||
* Check if current page has any DEAD tuples. If yes, delete these
|
||||
* tuples and see if we can get a space for the new item to be
|
||||
* inserted before moving to the next page in the bucket chain.
|
||||
*/
|
||||
if (H_HAS_DEAD_TUPLES(pageopaque))
|
||||
{
|
||||
|
|
@ -360,8 +359,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
|
|||
if (ndeletable > 0)
|
||||
{
|
||||
/*
|
||||
* Write-lock the meta page so that we can decrement
|
||||
* tuple count.
|
||||
* Write-lock the meta page so that we can decrement tuple count.
|
||||
*/
|
||||
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
|
|
@ -374,8 +372,8 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
|
|||
* Mark the page as not containing any LP_DEAD items. This is not
|
||||
* certainly true (there might be some that have recently been marked,
|
||||
* but weren't included in our target-item list), but it will almost
|
||||
* always be true and it doesn't seem worth an additional page scan
|
||||
* to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
|
||||
* always be true and it doesn't seem worth an additional page scan to
|
||||
* check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
|
||||
* anyway.
|
||||
*/
|
||||
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
||||
|
|
@ -401,9 +399,9 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
|
|||
XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage);
|
||||
|
||||
/*
|
||||
* We need the target-offsets array whether or not we store the whole
|
||||
* buffer, to allow us to find the latestRemovedXid on a standby
|
||||
* server.
|
||||
* We need the target-offsets array whether or not we store the
|
||||
* whole buffer, to allow us to find the latestRemovedXid on a
|
||||
* standby server.
|
||||
*/
|
||||
XLogRegisterData((char *) deletable,
|
||||
ndeletable * sizeof(OffsetNumber));
|
||||
|
|
@ -417,9 +415,10 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
|
|||
}
|
||||
|
||||
END_CRIT_SECTION();
|
||||
|
||||
/*
|
||||
* Releasing write lock on meta page as we have updated
|
||||
* the tuple count.
|
||||
* Releasing write lock on meta page as we have updated the tuple
|
||||
* count.
|
||||
*/
|
||||
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -177,8 +177,8 @@ _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
|
|||
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
/*
|
||||
* Set hasho_prevblkno with current hashm_maxbucket. This value will
|
||||
* be used to validate cached HashMetaPageData. See
|
||||
* Set hasho_prevblkno with current hashm_maxbucket. This value will be
|
||||
* used to validate cached HashMetaPageData. See
|
||||
* _hash_getbucketbuf_from_hashkey().
|
||||
*/
|
||||
pageopaque->hasho_prevblkno = max_bucket;
|
||||
|
|
@ -509,8 +509,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
|
|||
* Choose the number of initial bucket pages to match the fill factor
|
||||
* given the estimated number of tuples. We round up the result to the
|
||||
* total number of buckets which has to be allocated before using its
|
||||
* _hashm_spare element. However always force at least 2 bucket pages.
|
||||
* The upper limit is determined by considerations explained in
|
||||
* _hashm_spare element. However always force at least 2 bucket pages. The
|
||||
* upper limit is determined by considerations explained in
|
||||
* _hash_expandtable().
|
||||
*/
|
||||
dnumbuckets = num_tuples / ffactor;
|
||||
|
|
@ -568,8 +568,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
|
|||
metap->hashm_maxbucket = num_buckets - 1;
|
||||
|
||||
/*
|
||||
* Set highmask as next immediate ((2 ^ x) - 1), which should be sufficient
|
||||
* to cover num_buckets.
|
||||
* Set highmask as next immediate ((2 ^ x) - 1), which should be
|
||||
* sufficient to cover num_buckets.
|
||||
*/
|
||||
metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
|
||||
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
|
||||
|
|
@ -748,8 +748,8 @@ restart_expand:
|
|||
{
|
||||
/*
|
||||
* Copy bucket mapping info now; refer to the comment in code below
|
||||
* where we copy this information before calling _hash_splitbucket
|
||||
* to see why this is okay.
|
||||
* where we copy this information before calling _hash_splitbucket to
|
||||
* see why this is okay.
|
||||
*/
|
||||
maxbucket = metap->hashm_maxbucket;
|
||||
highmask = metap->hashm_highmask;
|
||||
|
|
@ -792,8 +792,7 @@ restart_expand:
|
|||
* We treat allocation of buckets as a separate WAL-logged action.
|
||||
* Even if we fail after this operation, won't leak bucket pages;
|
||||
* rather, the next split will consume this space. In any case, even
|
||||
* without failure we don't use all the space in one split
|
||||
* operation.
|
||||
* without failure we don't use all the space in one split operation.
|
||||
*/
|
||||
buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
|
||||
if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
|
||||
|
|
@ -870,10 +869,9 @@ restart_expand:
|
|||
|
||||
/*
|
||||
* Mark the old bucket to indicate that split is in progress. (At
|
||||
* operation end, we will clear the split-in-progress flag.) Also,
|
||||
* for a primary bucket page, hasho_prevblkno stores the number of
|
||||
* buckets that existed as of the last split, so we must update that
|
||||
* value here.
|
||||
* operation end, we will clear the split-in-progress flag.) Also, for a
|
||||
* primary bucket page, hasho_prevblkno stores the number of buckets that
|
||||
* existed as of the last split, so we must update that value here.
|
||||
*/
|
||||
oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
|
||||
oopaque->hasho_prevblkno = maxbucket;
|
||||
|
|
@ -1008,8 +1006,8 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
|
|||
|
||||
/*
|
||||
* Initialize the page. Just zeroing the page won't work; see
|
||||
* _hash_freeovflpage for similar usage. We take care to make the
|
||||
* special space valid for the benefit of tools such as pageinspect.
|
||||
* _hash_freeovflpage for similar usage. We take care to make the special
|
||||
* space valid for the benefit of tools such as pageinspect.
|
||||
*/
|
||||
_hash_pageinit(page, BLCKSZ);
|
||||
|
||||
|
|
@ -1479,10 +1477,10 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
|
|||
char *cache = NULL;
|
||||
|
||||
/*
|
||||
* It's important that we don't set rd_amcache to an invalid
|
||||
* value. Either MemoryContextAlloc or _hash_getbuf could fail,
|
||||
* so don't install a pointer to the newly-allocated storage in the
|
||||
* actual relcache entry until both have succeeeded.
|
||||
* It's important that we don't set rd_amcache to an invalid value.
|
||||
* Either MemoryContextAlloc or _hash_getbuf could fail, so don't
|
||||
* install a pointer to the newly-allocated storage in the actual
|
||||
* relcache entry until both have succeeeded.
|
||||
*/
|
||||
if (rel->rd_amcache == NULL)
|
||||
cache = MemoryContextAlloc(rel->rd_indexcxt,
|
||||
|
|
|
|||
|
|
@ -531,7 +531,8 @@ _hash_kill_items(IndexScanDesc scan)
|
|||
HashScanOpaque so = (HashScanOpaque) scan->opaque;
|
||||
Page page;
|
||||
HashPageOpaque opaque;
|
||||
OffsetNumber offnum, maxoff;
|
||||
OffsetNumber offnum,
|
||||
maxoff;
|
||||
int numKilled = so->numKilled;
|
||||
int i;
|
||||
bool killedsomething = false;
|
||||
|
|
@ -540,8 +541,8 @@ _hash_kill_items(IndexScanDesc scan)
|
|||
Assert(so->killedItems != NULL);
|
||||
|
||||
/*
|
||||
* Always reset the scan state, so we don't look for same
|
||||
* items on other pages.
|
||||
* Always reset the scan state, so we don't look for same items on other
|
||||
* pages.
|
||||
*/
|
||||
so->numKilled = 0;
|
||||
|
||||
|
|
@ -570,8 +571,8 @@ _hash_kill_items(IndexScanDesc scan)
|
|||
}
|
||||
|
||||
/*
|
||||
* Since this can be redone later if needed, mark as dirty hint.
|
||||
* Whenever we mark anything LP_DEAD, we also set the page's
|
||||
* Since this can be redone later if needed, mark as dirty hint. Whenever
|
||||
* we mark anything LP_DEAD, we also set the page's
|
||||
* LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint.
|
||||
*/
|
||||
if (killedsomething)
|
||||
|
|
|
|||
|
|
@ -3518,10 +3518,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
|||
*
|
||||
* For HOT considerations, this is wasted effort if we fail to update or
|
||||
* have to put the new tuple on a different page. But we must compute the
|
||||
* list before obtaining buffer lock --- in the worst case, if we are doing
|
||||
* an update on one of the relevant system catalogs, we could deadlock if
|
||||
* we try to fetch the list later. In any case, the relcache caches the
|
||||
* data so this is usually pretty cheap.
|
||||
* list before obtaining buffer lock --- in the worst case, if we are
|
||||
* doing an update on one of the relevant system catalogs, we could
|
||||
* deadlock if we try to fetch the list later. In any case, the relcache
|
||||
* caches the data so this is usually pretty cheap.
|
||||
*
|
||||
* We also need columns used by the replica identity and columns that are
|
||||
* considered the "key" of rows in the table.
|
||||
|
|
@ -3540,15 +3540,16 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
|||
page = BufferGetPage(buffer);
|
||||
|
||||
interesting_attrs = NULL;
|
||||
|
||||
/*
|
||||
* If the page is already full, there is hardly any chance of doing a HOT
|
||||
* update on this page. It might be wasteful effort to look for index
|
||||
* column updates only to later reject HOT updates for lack of space in the
|
||||
* same page. So we be conservative and only fetch hot_attrs if the page is
|
||||
* not already full. Since we are already holding a pin on the buffer,
|
||||
* there is no chance that the buffer can get cleaned up concurrently and
|
||||
* even if that was possible, in the worst case we lose a chance to do a
|
||||
* HOT update.
|
||||
* column updates only to later reject HOT updates for lack of space in
|
||||
* the same page. So we be conservative and only fetch hot_attrs if the
|
||||
* page is not already full. Since we are already holding a pin on the
|
||||
* buffer, there is no chance that the buffer can get cleaned up
|
||||
* concurrently and even if that was possible, in the worst case we lose a
|
||||
* chance to do a HOT update.
|
||||
*/
|
||||
if (!PageIsFull(page))
|
||||
{
|
||||
|
|
|
|||
|
|
@ -289,11 +289,11 @@ btbuildempty(Relation index)
|
|||
_bt_initmetapage(metapage, P_NONE, 0);
|
||||
|
||||
/*
|
||||
* Write the page and log it. It might seem that an immediate sync
|
||||
* would be sufficient to guarantee that the file exists on disk, but
|
||||
* recovery itself might remove it while replaying, for example, an
|
||||
* XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we
|
||||
* need this even when wal_level=minimal.
|
||||
* Write the page and log it. It might seem that an immediate sync would
|
||||
* be sufficient to guarantee that the file exists on disk, but recovery
|
||||
* itself might remove it while replaying, for example, an
|
||||
* XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record. Therefore, we need
|
||||
* this even when wal_level=minimal.
|
||||
*/
|
||||
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
|
||||
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
|
||||
|
|
|
|||
|
|
@ -164,10 +164,10 @@ spgbuildempty(Relation index)
|
|||
|
||||
/*
|
||||
* Write the page and log it unconditionally. This is important
|
||||
* particularly for indexes created on tablespaces and databases
|
||||
* whose creation happened after the last redo pointer as recovery
|
||||
* removes any of their existing content when the corresponding
|
||||
* create records are replayed.
|
||||
* particularly for indexes created on tablespaces and databases whose
|
||||
* creation happened after the last redo pointer as recovery removes any
|
||||
* of their existing content when the corresponding create records are
|
||||
* replayed.
|
||||
*/
|
||||
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
|
||||
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
|
||||
|
|
|
|||
|
|
@ -683,10 +683,10 @@ TruncateCLOG(TransactionId oldestXact, Oid oldestxid_datoid)
|
|||
ShmemVariableCache->oldestXid));
|
||||
|
||||
/*
|
||||
* Write XLOG record and flush XLOG to disk. We record the oldest xid we're
|
||||
* keeping information about here so we can ensure that it's always ahead
|
||||
* of clog truncation in case we crash, and so a standby finds out the new
|
||||
* valid xid before the next checkpoint.
|
||||
* Write XLOG record and flush XLOG to disk. We record the oldest xid
|
||||
* we're keeping information about here so we can ensure that it's always
|
||||
* ahead of clog truncation in case we crash, and so a standby finds out
|
||||
* the new valid xid before the next checkpoint.
|
||||
*/
|
||||
WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid);
|
||||
|
||||
|
|
|
|||
|
|
@ -748,8 +748,8 @@ ShutdownCommitTs(void)
|
|||
SimpleLruFlush(CommitTsCtl, false);
|
||||
|
||||
/*
|
||||
* fsync pg_commit_ts to ensure that any files flushed previously are durably
|
||||
* on disk.
|
||||
* fsync pg_commit_ts to ensure that any files flushed previously are
|
||||
* durably on disk.
|
||||
*/
|
||||
fsync_fname("pg_commit_ts", true);
|
||||
}
|
||||
|
|
@ -764,8 +764,8 @@ CheckPointCommitTs(void)
|
|||
SimpleLruFlush(CommitTsCtl, true);
|
||||
|
||||
/*
|
||||
* fsync pg_commit_ts to ensure that any files flushed previously are durably
|
||||
* on disk.
|
||||
* fsync pg_commit_ts to ensure that any files flushed previously are
|
||||
* durably on disk.
|
||||
*/
|
||||
fsync_fname("pg_commit_ts", true);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,9 +87,9 @@ SubTransSetParent(TransactionId xid, TransactionId parent)
|
|||
ptr += entryno;
|
||||
|
||||
/*
|
||||
* It's possible we'll try to set the parent xid multiple times
|
||||
* but we shouldn't ever be changing the xid from one valid xid
|
||||
* to another valid xid, which would corrupt the data structure.
|
||||
* It's possible we'll try to set the parent xid multiple times but we
|
||||
* shouldn't ever be changing the xid from one valid xid to another valid
|
||||
* xid, which would corrupt the data structure.
|
||||
*/
|
||||
if (*ptr != parent)
|
||||
{
|
||||
|
|
@ -162,9 +162,9 @@ SubTransGetTopmostTransaction(TransactionId xid)
|
|||
parentXid = SubTransGetParent(parentXid);
|
||||
|
||||
/*
|
||||
* By convention the parent xid gets allocated first, so should
|
||||
* always precede the child xid. Anything else points to a corrupted
|
||||
* data structure that could lead to an infinite loop, so exit.
|
||||
* By convention the parent xid gets allocated first, so should always
|
||||
* precede the child xid. Anything else points to a corrupted data
|
||||
* structure that could lead to an infinite loop, so exit.
|
||||
*/
|
||||
if (!TransactionIdPrecedes(parentXid, previousXid))
|
||||
elog(ERROR, "pg_subtrans contains invalid entry: xid %u points to parent xid %u",
|
||||
|
|
|
|||
|
|
@ -1675,7 +1675,10 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
|
|||
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
|
||||
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
|
||||
{
|
||||
/* Note that we are using gxact not pgxact so this works in recovery also */
|
||||
/*
|
||||
* Note that we are using gxact not pgxact so this works in recovery
|
||||
* also
|
||||
*/
|
||||
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
|
||||
|
||||
if ((gxact->valid || gxact->inredo) &&
|
||||
|
|
@ -1920,13 +1923,13 @@ RecoverPreparedTransactions(void)
|
|||
xid = gxact->xid;
|
||||
|
||||
/*
|
||||
* Reconstruct subtrans state for the transaction --- needed
|
||||
* because pg_subtrans is not preserved over a restart. Note that
|
||||
* we are linking all the subtransactions directly to the
|
||||
* top-level XID; there may originally have been a more complex
|
||||
* hierarchy, but there's no need to restore that exactly.
|
||||
* It's possible that SubTransSetParent has been set before, if
|
||||
* the prepared transaction generated xid assignment records.
|
||||
* Reconstruct subtrans state for the transaction --- needed because
|
||||
* pg_subtrans is not preserved over a restart. Note that we are
|
||||
* linking all the subtransactions directly to the top-level XID;
|
||||
* there may originally have been a more complex hierarchy, but
|
||||
* there's no need to restore that exactly. It's possible that
|
||||
* SubTransSetParent has been set before, if the prepared transaction
|
||||
* generated xid assignment records.
|
||||
*/
|
||||
buf = ProcessTwoPhaseBuffer(xid,
|
||||
gxact->prepare_start_lsn,
|
||||
|
|
@ -1949,9 +1952,8 @@ RecoverPreparedTransactions(void)
|
|||
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
|
||||
|
||||
/*
|
||||
* Recreate its GXACT and dummy PGPROC. But, check whether
|
||||
* it was added in redo and already has a shmem entry for
|
||||
* it.
|
||||
* Recreate its GXACT and dummy PGPROC. But, check whether it was
|
||||
* added in redo and already has a shmem entry for it.
|
||||
*/
|
||||
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
|
||||
MarkAsPreparingGuts(gxact, xid, gid,
|
||||
|
|
@ -1980,9 +1982,8 @@ RecoverPreparedTransactions(void)
|
|||
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
|
||||
|
||||
/*
|
||||
* We're done with recovering this transaction. Clear
|
||||
* MyLockedGxact, like we do in PrepareTransaction() during normal
|
||||
* operation.
|
||||
* We're done with recovering this transaction. Clear MyLockedGxact,
|
||||
* like we do in PrepareTransaction() during normal operation.
|
||||
*/
|
||||
PostPrepare_Twophase();
|
||||
|
||||
|
|
@ -2098,8 +2099,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
|
|||
}
|
||||
|
||||
/*
|
||||
* Examine subtransaction XIDs ... they should all follow main
|
||||
* XID, and they may force us to advance nextXid.
|
||||
* Examine subtransaction XIDs ... they should all follow main XID, and
|
||||
* they may force us to advance nextXid.
|
||||
*/
|
||||
subxids = (TransactionId *) (buf +
|
||||
MAXALIGN(sizeof(TwoPhaseFileHeader)) +
|
||||
|
|
@ -2175,8 +2176,9 @@ RecordTransactionCommitPrepared(TransactionId xid,
|
|||
MyPgXact->delayChkpt = true;
|
||||
|
||||
/*
|
||||
* Emit the XLOG commit record. Note that we mark 2PC commits as potentially
|
||||
* having AccessExclusiveLocks since we don't know whether or not they do.
|
||||
* Emit the XLOG commit record. Note that we mark 2PC commits as
|
||||
* potentially having AccessExclusiveLocks since we don't know whether or
|
||||
* not they do.
|
||||
*/
|
||||
recptr = XactLogCommitRecord(committs,
|
||||
nchildren, children, nrels, rels,
|
||||
|
|
@ -2260,8 +2262,9 @@ RecordTransactionAbortPrepared(TransactionId xid,
|
|||
START_CRIT_SECTION();
|
||||
|
||||
/*
|
||||
* Emit the XLOG commit record. Note that we mark 2PC aborts as potentially
|
||||
* having AccessExclusiveLocks since we don't know whether or not they do.
|
||||
* Emit the XLOG commit record. Note that we mark 2PC aborts as
|
||||
* potentially having AccessExclusiveLocks since we don't know whether or
|
||||
* not they do.
|
||||
*/
|
||||
recptr = XactLogAbortRecord(GetCurrentTimestamp(),
|
||||
nchildren, children,
|
||||
|
|
@ -2315,8 +2318,8 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
|
|||
*
|
||||
* This creates a gxact struct and puts it into the active array.
|
||||
*
|
||||
* In redo, this struct is mainly used to track PREPARE/COMMIT entries
|
||||
* in shared memory. Hence, we only fill up the bare minimum contents here.
|
||||
* In redo, this struct is mainly used to track PREPARE/COMMIT entries in
|
||||
* shared memory. Hence, we only fill up the bare minimum contents here.
|
||||
* The gxact also gets marked with gxact->inredo set to true to indicate
|
||||
* that it got added in the redo phase
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -2641,7 +2641,8 @@ CleanupTransaction(void)
|
|||
* do abort cleanup processing
|
||||
*/
|
||||
AtCleanup_Portals(); /* now safe to release portal memory */
|
||||
AtEOXact_Snapshot(false, true); /* and release the transaction's snapshots */
|
||||
AtEOXact_Snapshot(false, true); /* and release the transaction's
|
||||
* snapshots */
|
||||
|
||||
CurrentResourceOwner = NULL; /* and resource owner */
|
||||
if (TopTransactionResourceOwner)
|
||||
|
|
@ -5646,8 +5647,8 @@ xact_redo(XLogReaderState *record)
|
|||
else if (info == XLOG_XACT_PREPARE)
|
||||
{
|
||||
/*
|
||||
* Store xid and start/end pointers of the WAL record in
|
||||
* TwoPhaseState gxact entry.
|
||||
* Store xid and start/end pointers of the WAL record in TwoPhaseState
|
||||
* gxact entry.
|
||||
*/
|
||||
PrepareRedoAdd(XLogRecGetData(record),
|
||||
record->ReadRecPtr,
|
||||
|
|
|
|||
|
|
@ -550,13 +550,12 @@ typedef struct XLogCtlInsert
|
|||
bool fullPageWrites;
|
||||
|
||||
/*
|
||||
* exclusiveBackupState indicates the state of an exclusive backup
|
||||
* (see comments of ExclusiveBackupState for more details).
|
||||
* nonExclusiveBackups is a counter indicating the number of streaming
|
||||
* base backups currently in progress. forcePageWrites is set to true
|
||||
* when either of these is non-zero. lastBackupStart is the latest
|
||||
* checkpoint redo location used as a starting point for an online
|
||||
* backup.
|
||||
* exclusiveBackupState indicates the state of an exclusive backup (see
|
||||
* comments of ExclusiveBackupState for more details). nonExclusiveBackups
|
||||
* is a counter indicating the number of streaming base backups currently
|
||||
* in progress. forcePageWrites is set to true when either of these is
|
||||
* non-zero. lastBackupStart is the latest checkpoint redo location used
|
||||
* as a starting point for an online backup.
|
||||
*/
|
||||
ExclusiveBackupState exclusiveBackupState;
|
||||
int nonExclusiveBackups;
|
||||
|
|
@ -1405,7 +1404,8 @@ checkXLogConsistency(XLogReaderState *record)
|
|||
|
||||
/*
|
||||
* If the block LSN is already ahead of this WAL record, we can't
|
||||
* expect contents to match. This can happen if recovery is restarted.
|
||||
* expect contents to match. This can happen if recovery is
|
||||
* restarted.
|
||||
*/
|
||||
if (PageGetLSN(replay_image_masked) > record->EndRecPtr)
|
||||
continue;
|
||||
|
|
@ -4975,10 +4975,10 @@ BootStrapXLOG(void)
|
|||
sysidentifier |= getpid() & 0xFFF;
|
||||
|
||||
/*
|
||||
* Generate a random nonce. This is used for authentication requests
|
||||
* that will fail because the user does not exist. The nonce is used to
|
||||
* create a genuine-looking password challenge for the non-existent user,
|
||||
* in lieu of an actual stored password.
|
||||
* Generate a random nonce. This is used for authentication requests that
|
||||
* will fail because the user does not exist. The nonce is used to create
|
||||
* a genuine-looking password challenge for the non-existent user, in lieu
|
||||
* of an actual stored password.
|
||||
*/
|
||||
if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN))
|
||||
ereport(PANIC,
|
||||
|
|
@ -6352,8 +6352,8 @@ StartupXLOG(void)
|
|||
xlogreader->system_identifier = ControlFile->system_identifier;
|
||||
|
||||
/*
|
||||
* Allocate pages dedicated to WAL consistency checks, those had better
|
||||
* be aligned.
|
||||
* Allocate pages dedicated to WAL consistency checks, those had better be
|
||||
* aligned.
|
||||
*/
|
||||
replay_image_masked = (char *) palloc(BLCKSZ);
|
||||
master_image_masked = (char *) palloc(BLCKSZ);
|
||||
|
|
@ -6687,21 +6687,21 @@ StartupXLOG(void)
|
|||
|
||||
/*
|
||||
* Copy any missing timeline history files between 'now' and the recovery
|
||||
* target timeline from archive to pg_wal. While we don't need those
|
||||
* files ourselves - the history file of the recovery target timeline
|
||||
* covers all the previous timelines in the history too - a cascading
|
||||
* standby server might be interested in them. Or, if you archive the WAL
|
||||
* from this server to a different archive than the master, it'd be good
|
||||
* for all the history files to get archived there after failover, so that
|
||||
* you can use one of the old timelines as a PITR target. Timeline history
|
||||
* files are small, so it's better to copy them unnecessarily than not
|
||||
* copy them and regret later.
|
||||
* target timeline from archive to pg_wal. While we don't need those files
|
||||
* ourselves - the history file of the recovery target timeline covers all
|
||||
* the previous timelines in the history too - a cascading standby server
|
||||
* might be interested in them. Or, if you archive the WAL from this
|
||||
* server to a different archive than the master, it'd be good for all the
|
||||
* history files to get archived there after failover, so that you can use
|
||||
* one of the old timelines as a PITR target. Timeline history files are
|
||||
* small, so it's better to copy them unnecessarily than not copy them and
|
||||
* regret later.
|
||||
*/
|
||||
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
|
||||
|
||||
/*
|
||||
* Before running in recovery, scan pg_twophase and fill in its status
|
||||
* to be able to work on entries generated by redo. Doing a scan before
|
||||
* Before running in recovery, scan pg_twophase and fill in its status to
|
||||
* be able to work on entries generated by redo. Doing a scan before
|
||||
* taking any recovery action has the merit to discard any 2PC files that
|
||||
* are newer than the first record to replay, saving from any conflicts at
|
||||
* replay. This avoids as well any subsequent scans when doing recovery
|
||||
|
|
@ -7426,7 +7426,7 @@ StartupXLOG(void)
|
|||
snprintf(reason, sizeof(reason),
|
||||
"%s LSN %X/%X\n",
|
||||
recoveryStopAfter ? "after" : "before",
|
||||
(uint32 ) (recoveryStopLSN >> 32),
|
||||
(uint32) (recoveryStopLSN >> 32),
|
||||
(uint32) recoveryStopLSN);
|
||||
else if (recoveryTarget == RECOVERY_TARGET_NAME)
|
||||
snprintf(reason, sizeof(reason),
|
||||
|
|
@ -9645,6 +9645,7 @@ xlog_redo(XLogReaderState *record)
|
|||
|
||||
MultiXactAdvanceOldest(checkPoint.oldestMulti,
|
||||
checkPoint.oldestMultiDB);
|
||||
|
||||
/*
|
||||
* No need to set oldestClogXid here as well; it'll be set when we
|
||||
* redo an xl_clog_truncate if it changed since initialization.
|
||||
|
|
@ -10238,8 +10239,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
|
|||
if (exclusive)
|
||||
{
|
||||
/*
|
||||
* At first, mark that we're now starting an exclusive backup,
|
||||
* to ensure that there are no other sessions currently running
|
||||
* At first, mark that we're now starting an exclusive backup, to
|
||||
* ensure that there are no other sessions currently running
|
||||
* pg_start_backup() or pg_stop_backup().
|
||||
*/
|
||||
if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE)
|
||||
|
|
@ -10505,8 +10506,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
|
|||
{
|
||||
/*
|
||||
* Check for existing backup label --- implies a backup is already
|
||||
* running. (XXX given that we checked exclusiveBackupState above,
|
||||
* maybe it would be OK to just unlink any such label file?)
|
||||
* running. (XXX given that we checked exclusiveBackupState
|
||||
* above, maybe it would be OK to just unlink any such label
|
||||
* file?)
|
||||
*/
|
||||
if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
|
||||
{
|
||||
|
|
@ -10727,8 +10729,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
|
|||
if (exclusive)
|
||||
{
|
||||
/*
|
||||
* At first, mark that we're now stopping an exclusive backup,
|
||||
* to ensure that there are no other sessions currently running
|
||||
* At first, mark that we're now stopping an exclusive backup, to
|
||||
* ensure that there are no other sessions currently running
|
||||
* pg_start_backup() or pg_stop_backup().
|
||||
*/
|
||||
WALInsertLockAcquireExclusive();
|
||||
|
|
@ -10790,8 +10792,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
|
|||
durable_unlink(BACKUP_LABEL_FILE, ERROR);
|
||||
|
||||
/*
|
||||
* Remove tablespace_map file if present, it is created only if there
|
||||
* are tablespaces.
|
||||
* Remove tablespace_map file if present, it is created only if
|
||||
* there are tablespaces.
|
||||
*/
|
||||
durable_unlink(TABLESPACE_MAP, DEBUG1);
|
||||
}
|
||||
|
|
@ -10978,9 +10980,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
|
|||
* archived before returning. If archiving isn't enabled, the required WAL
|
||||
* needs to be transported via streaming replication (hopefully with
|
||||
* wal_keep_segments set high enough), or some more exotic mechanism like
|
||||
* polling and copying files from pg_wal with script. We have no
|
||||
* knowledge of those mechanisms, so it's up to the user to ensure that he
|
||||
* gets all the required WAL.
|
||||
* polling and copying files from pg_wal with script. We have no knowledge
|
||||
* of those mechanisms, so it's up to the user to ensure that he gets all
|
||||
* the required WAL.
|
||||
*
|
||||
* We wait until both the last WAL file filled during backup and the
|
||||
* history file have been archived, and assume that the alphabetic sorting
|
||||
|
|
@ -10990,8 +10992,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
|
|||
* We wait forever, since archive_command is supposed to work and we
|
||||
* assume the admin wanted his backup to work completely. If you don't
|
||||
* wish to wait, then either waitforarchive should be passed in as false,
|
||||
* or you can set statement_timeout. Also, some notices are
|
||||
* issued to clue in anyone who might be doing this interactively.
|
||||
* or you can set statement_timeout. Also, some notices are issued to
|
||||
* clue in anyone who might be doing this interactively.
|
||||
*/
|
||||
if (waitforarchive && XLogArchivingActive())
|
||||
{
|
||||
|
|
@ -11717,8 +11719,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
|
|||
* little chance that the problem will just go away, but
|
||||
* PANIC is not good for availability either, especially
|
||||
* in hot standby mode. So, we treat that the same as
|
||||
* disconnection, and retry from archive/pg_wal again.
|
||||
* The WAL in the archive should be identical to what was
|
||||
* disconnection, and retry from archive/pg_wal again. The
|
||||
* WAL in the archive should be identical to what was
|
||||
* streamed, so it's unlikely that it helps, but one can
|
||||
* hope...
|
||||
*/
|
||||
|
|
@ -11881,9 +11883,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
|
|||
* not open already. Also read the timeline history
|
||||
* file if we haven't initialized timeline history
|
||||
* yet; it should be streamed over and present in
|
||||
* pg_wal by now. Use XLOG_FROM_STREAM so that
|
||||
* source info is set correctly and XLogReceiptTime
|
||||
* isn't changed.
|
||||
* pg_wal by now. Use XLOG_FROM_STREAM so that source
|
||||
* info is set correctly and XLogReceiptTime isn't
|
||||
* changed.
|
||||
*/
|
||||
if (readFile < 0)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -156,7 +156,8 @@ pg_stop_backup(PG_FUNCTION_ARGS)
|
|||
* Exclusive backups were typically started in a different connection, so
|
||||
* don't try to verify that status of backup is set to
|
||||
* SESSION_BACKUP_EXCLUSIVE in this function. Actual verification that an
|
||||
* exclusive backup is in fact running is handled inside do_pg_stop_backup.
|
||||
* exclusive backup is in fact running is handled inside
|
||||
* do_pg_stop_backup.
|
||||
*/
|
||||
stoppoint = do_pg_stop_backup(NULL, true, NULL);
|
||||
|
||||
|
|
|
|||
|
|
@ -507,10 +507,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
|
|||
hdr_rdt.data = hdr_scratch;
|
||||
|
||||
/*
|
||||
* Enforce consistency checks for this record if user is looking for
|
||||
* it. Do this before at the beginning of this routine to give the
|
||||
* possibility for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY
|
||||
* directly for a record.
|
||||
* Enforce consistency checks for this record if user is looking for it.
|
||||
* Do this before at the beginning of this routine to give the possibility
|
||||
* for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
|
||||
* a record.
|
||||
*/
|
||||
if (wal_consistency_checking[rmid])
|
||||
info |= XLR_CHECK_CONSISTENCY;
|
||||
|
|
@ -576,9 +576,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
|
|||
bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
|
||||
|
||||
/*
|
||||
* If needs_backup is true or WAL checking is enabled for
|
||||
* current resource manager, log a full-page write for the current
|
||||
* block.
|
||||
* If needs_backup is true or WAL checking is enabled for current
|
||||
* resource manager, log a full-page write for the current block.
|
||||
*/
|
||||
include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0;
|
||||
|
||||
|
|
@ -645,8 +644,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
|
|||
bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
|
||||
|
||||
/*
|
||||
* If WAL consistency checking is enabled for the resource manager of
|
||||
* this WAL record, a full-page image is included in the record
|
||||
* If WAL consistency checking is enabled for the resource manager
|
||||
* of this WAL record, a full-page image is included in the record
|
||||
* for the block modified. During redo, the full-page is replayed
|
||||
* only if BKPIMAGE_APPLY is set.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -892,8 +892,8 @@ XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr)
|
|||
* that, except when caller has explicitly specified the offset that
|
||||
* falls somewhere there or when we are skipping multi-page
|
||||
* continuation record. It doesn't matter though because
|
||||
* ReadPageInternal() is prepared to handle that and will read at least
|
||||
* short page-header worth of data
|
||||
* ReadPageInternal() is prepared to handle that and will read at
|
||||
* least short page-header worth of data
|
||||
*/
|
||||
targetRecOff = tmpRecPtr % XLOG_BLCKSZ;
|
||||
|
||||
|
|
|
|||
|
|
@ -805,22 +805,23 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
|
|||
Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
|
||||
|
||||
/*
|
||||
* If the desired page is currently read in and valid, we have nothing to do.
|
||||
* If the desired page is currently read in and valid, we have nothing to
|
||||
* do.
|
||||
*
|
||||
* The caller should've ensured that it didn't previously advance readOff
|
||||
* past the valid limit of this timeline, so it doesn't matter if the current
|
||||
* TLI has since become historical.
|
||||
* past the valid limit of this timeline, so it doesn't matter if the
|
||||
* current TLI has since become historical.
|
||||
*/
|
||||
if (lastReadPage == wantPage &&
|
||||
state->readLen != 0 &&
|
||||
lastReadPage + state->readLen >= wantPage + Min(wantLength,XLOG_BLCKSZ-1))
|
||||
lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we're reading from the current timeline, it hasn't become historical
|
||||
* and the page we're reading is after the last page read, we can again
|
||||
* just carry on. (Seeking backwards requires a check to make sure the older
|
||||
* page isn't on a prior timeline).
|
||||
* just carry on. (Seeking backwards requires a check to make sure the
|
||||
* older page isn't on a prior timeline).
|
||||
*
|
||||
* ThisTimeLineID might've become historical since we last looked, but the
|
||||
* caller is required not to read past the flush limit it saw at the time
|
||||
|
|
@ -835,8 +836,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
|
|||
|
||||
/*
|
||||
* If we're just reading pages from a previously validated historical
|
||||
* timeline and the timeline we're reading from is valid until the
|
||||
* end of the current segment we can just keep reading.
|
||||
* timeline and the timeline we're reading from is valid until the end of
|
||||
* the current segment we can just keep reading.
|
||||
*/
|
||||
if (state->currTLIValidUntil != InvalidXLogRecPtr &&
|
||||
state->currTLI != ThisTimeLineID &&
|
||||
|
|
@ -845,10 +846,10 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
|
|||
return;
|
||||
|
||||
/*
|
||||
* If we reach this point we're either looking up a page for random access,
|
||||
* the current timeline just became historical, or we're reading from a new
|
||||
* segment containing a timeline switch. In all cases we need to determine
|
||||
* the newest timeline on the segment.
|
||||
* If we reach this point we're either looking up a page for random
|
||||
* access, the current timeline just became historical, or we're reading
|
||||
* from a new segment containing a timeline switch. In all cases we need
|
||||
* to determine the newest timeline on the segment.
|
||||
*
|
||||
* If it's the current timeline we can just keep reading from here unless
|
||||
* we detect a timeline switch that makes the current timeline historical.
|
||||
|
|
@ -867,7 +868,10 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
|
|||
|
||||
Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize);
|
||||
|
||||
/* Find the timeline of the last LSN on the segment containing wantPage. */
|
||||
/*
|
||||
* Find the timeline of the last LSN on the segment containing
|
||||
* wantPage.
|
||||
*/
|
||||
state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
|
||||
state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
|
||||
&state->nextTLI);
|
||||
|
|
@ -879,8 +883,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
|
|||
|
||||
elog(DEBUG3, "switched to timeline %u valid until %X/%X",
|
||||
state->currTLI,
|
||||
(uint32)(state->currTLIValidUntil >> 32),
|
||||
(uint32)(state->currTLIValidUntil));
|
||||
(uint32) (state->currTLIValidUntil >> 32),
|
||||
(uint32) (state->currTLIValidUntil));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -929,8 +933,8 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
|
|||
*
|
||||
* We have to do it each time through the loop because if we're in
|
||||
* recovery as a cascading standby, the current timeline might've
|
||||
* become historical. We can't rely on RecoveryInProgress() because
|
||||
* in a standby configuration like
|
||||
* become historical. We can't rely on RecoveryInProgress() because in
|
||||
* a standby configuration like
|
||||
*
|
||||
* A => B => C
|
||||
*
|
||||
|
|
@ -938,12 +942,13 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
|
|||
* timeline will change while we remain in recovery.
|
||||
*
|
||||
* We can't just keep reading from the old timeline as the last WAL
|
||||
* archive in the timeline will get renamed to .partial by StartupXLOG().
|
||||
* archive in the timeline will get renamed to .partial by
|
||||
* StartupXLOG().
|
||||
*
|
||||
* If that happens after our caller updated ThisTimeLineID but before
|
||||
* we actually read the xlog page, we might still try to read from the
|
||||
* old (now renamed) segment and fail. There's not much we can do about
|
||||
* this, but it can only happen when we're a leaf of a cascading
|
||||
* old (now renamed) segment and fail. There's not much we can do
|
||||
* about this, but it can only happen when we're a leaf of a cascading
|
||||
* standby whose master gets promoted while we're decoding, so a
|
||||
* one-off ERROR isn't too bad.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1125,8 +1125,10 @@ doDeletion(const ObjectAddress *object, int flags)
|
|||
heap_drop_with_catalog(object->objectId);
|
||||
}
|
||||
|
||||
/* for a sequence, in addition to dropping the heap, also
|
||||
* delete pg_sequence tuple */
|
||||
/*
|
||||
* for a sequence, in addition to dropping the heap, also
|
||||
* delete pg_sequence tuple
|
||||
*/
|
||||
if (relKind == RELKIND_SEQUENCE)
|
||||
DeleteSequenceTuple(object->objectId);
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -1762,10 +1762,10 @@ heap_drop_with_catalog(Oid relid)
|
|||
/*
|
||||
* To drop a partition safely, we must grab exclusive lock on its parent,
|
||||
* because another backend might be about to execute a query on the parent
|
||||
* table. If it relies on previously cached partition descriptor, then
|
||||
* it could attempt to access the just-dropped relation as its partition.
|
||||
* We must therefore take a table lock strong enough to prevent all
|
||||
* queries on the table from proceeding until we commit and send out a
|
||||
* table. If it relies on previously cached partition descriptor, then it
|
||||
* could attempt to access the just-dropped relation as its partition. We
|
||||
* must therefore take a table lock strong enough to prevent all queries
|
||||
* on the table from proceeding until we commit and send out a
|
||||
* shared-cache-inval notice that will make them update their index lists.
|
||||
*/
|
||||
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
||||
|
|
|
|||
|
|
@ -577,9 +577,9 @@ getOwnedSequences(Oid relid, AttrNumber attnum)
|
|||
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
* We assume any auto or internal dependency of a sequence on a column must be
|
||||
* what we are looking for. (We need the relkind test because indexes
|
||||
* can also have auto dependencies on columns.)
|
||||
* We assume any auto or internal dependency of a sequence on a column
|
||||
* must be what we are looking for. (We need the relkind test because
|
||||
* indexes can also have auto dependencies on columns.)
|
||||
*/
|
||||
if (deprec->classid == RelationRelationId &&
|
||||
deprec->objsubid == 0 &&
|
||||
|
|
|
|||
|
|
@ -207,7 +207,8 @@ static List *
|
|||
textarray_to_stringlist(ArrayType *textarray)
|
||||
{
|
||||
Datum *elems;
|
||||
int nelems, i;
|
||||
int nelems,
|
||||
i;
|
||||
List *res = NIL;
|
||||
|
||||
deconstruct_array(textarray,
|
||||
|
|
@ -248,8 +249,8 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state,
|
|||
ObjectIdGetDatum(subid));
|
||||
|
||||
/*
|
||||
* If the record for given table does not exist yet create new
|
||||
* record, otherwise update the existing one.
|
||||
* If the record for given table does not exist yet create new record,
|
||||
* otherwise update the existing one.
|
||||
*/
|
||||
if (!HeapTupleIsValid(tup))
|
||||
{
|
||||
|
|
@ -435,7 +436,7 @@ GetSubscriptionRelations(Oid subid)
|
|||
|
||||
subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
|
||||
|
||||
relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
|
||||
relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
|
||||
relstate->relid = subrel->srrelid;
|
||||
relstate->state = subrel->srsubstate;
|
||||
relstate->lsn = subrel->srsublsn;
|
||||
|
|
@ -487,7 +488,7 @@ GetSubscriptionNotReadyRelations(Oid subid)
|
|||
|
||||
subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
|
||||
|
||||
relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
|
||||
relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
|
||||
relstate->relid = subrel->srrelid;
|
||||
relstate->state = subrel->srsubstate;
|
||||
relstate->lsn = subrel->srsublsn;
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ typedef struct CopyStateData
|
|||
List *attnumlist; /* integer list of attnums to copy */
|
||||
char *filename; /* filename, or NULL for STDIN/STDOUT */
|
||||
bool is_program; /* is 'filename' a program to popen? */
|
||||
copy_data_source_cb data_source_cb; /* function for reading data*/
|
||||
copy_data_source_cb data_source_cb; /* function for reading data */
|
||||
bool binary; /* binary format? */
|
||||
bool oids; /* include OIDs? */
|
||||
bool freeze; /* freeze rows on loading? */
|
||||
|
|
|
|||
|
|
@ -2134,7 +2134,8 @@ dbase_redo(XLogReaderState *record)
|
|||
* which can happen in some cases.
|
||||
*
|
||||
* This will lock out walsenders trying to connect to db-specific
|
||||
* slots for logical decoding too, so it's safe for us to drop slots.
|
||||
* slots for logical decoding too, so it's safe for us to drop
|
||||
* slots.
|
||||
*/
|
||||
LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
|
||||
ResolveRecoveryConflictWithDatabase(xlrec->db_id);
|
||||
|
|
|
|||
|
|
@ -336,7 +336,7 @@ defGetStringList(DefElem *def)
|
|||
if (nodeTag(def->arg) != T_List)
|
||||
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(def->arg));
|
||||
|
||||
foreach(cell, (List *)def->arg)
|
||||
foreach(cell, (List *) def->arg)
|
||||
{
|
||||
Node *str = (Node *) lfirst(cell);
|
||||
|
||||
|
|
|
|||
|
|
@ -328,6 +328,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
|
|||
case OBJECT_FUNCTION:
|
||||
{
|
||||
ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
|
||||
|
||||
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
|
||||
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
|
||||
{
|
||||
|
|
@ -340,6 +341,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
|
|||
case OBJECT_AGGREGATE:
|
||||
{
|
||||
ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
|
||||
|
||||
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
|
||||
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
|
||||
{
|
||||
|
|
@ -352,6 +354,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
|
|||
case OBJECT_OPERATOR:
|
||||
{
|
||||
ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
|
||||
|
||||
if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
|
||||
!type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
|
||||
{
|
||||
|
|
|
|||
|
|
@ -878,8 +878,8 @@ CreateForeignServer(CreateForeignServerStmt *stmt)
|
|||
ownerId = GetUserId();
|
||||
|
||||
/*
|
||||
* Check that there is no other foreign server by this name.
|
||||
* Do nothing if IF NOT EXISTS was enforced.
|
||||
* Check that there is no other foreign server by this name. Do nothing if
|
||||
* IF NOT EXISTS was enforced.
|
||||
*/
|
||||
if (GetForeignServerByName(stmt->servername, true) != NULL)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ parse_publication_options(List *options,
|
|||
*publish_delete = true;
|
||||
|
||||
/* Parse options */
|
||||
foreach (lc, options)
|
||||
foreach(lc, options)
|
||||
{
|
||||
DefElem *defel = (DefElem *) lfirst(lc);
|
||||
|
||||
|
|
@ -106,9 +106,9 @@ parse_publication_options(List *options,
|
|||
errmsg("invalid publish list")));
|
||||
|
||||
/* Process the option list. */
|
||||
foreach (lc, publish_list)
|
||||
foreach(lc, publish_list)
|
||||
{
|
||||
char *publish_opt = (char *)lfirst(lc);
|
||||
char *publish_opt = (char *) lfirst(lc);
|
||||
|
||||
if (strcmp(publish_opt, "insert") == 0)
|
||||
*publish_insert = true;
|
||||
|
|
@ -285,7 +285,7 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
|
|||
{
|
||||
ListCell *lc;
|
||||
|
||||
foreach (lc, relids)
|
||||
foreach(lc, relids)
|
||||
{
|
||||
Oid relid = lfirst_oid(lc);
|
||||
|
||||
|
|
@ -358,6 +358,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
|
|||
{
|
||||
Relation oldrel = heap_open(oldrelid,
|
||||
ShareUpdateExclusiveLock);
|
||||
|
||||
delrels = lappend(delrels, oldrel);
|
||||
}
|
||||
}
|
||||
|
|
@ -366,8 +367,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
|
|||
PublicationDropTables(pubid, delrels, true);
|
||||
|
||||
/*
|
||||
* Don't bother calculating the difference for adding, we'll catch
|
||||
* and skip existing ones when doing catalog update.
|
||||
* Don't bother calculating the difference for adding, we'll catch and
|
||||
* skip existing ones when doing catalog update.
|
||||
*/
|
||||
PublicationAddTables(pubid, rels, true, stmt);
|
||||
|
||||
|
|
@ -632,7 +633,7 @@ PublicationDropTables(Oid pubid, List *rels, bool missing_ok)
|
|||
/*
|
||||
* Internal workhorse for changing a publication owner
|
||||
*/
|
||||
static void
|
||||
static void
|
||||
AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
|
||||
{
|
||||
Form_pg_publication form;
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
|
|||
*synchronous_commit = NULL;
|
||||
|
||||
/* Parse options */
|
||||
foreach (lc, options)
|
||||
foreach(lc, options)
|
||||
{
|
||||
DefElem *defel = (DefElem *) lfirst(lc);
|
||||
|
||||
|
|
@ -200,8 +200,8 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
|
|||
}
|
||||
|
||||
/*
|
||||
* Do additional checking for disallowed combination when
|
||||
* slot_name = NONE was used.
|
||||
* Do additional checking for disallowed combination when slot_name = NONE
|
||||
* was used.
|
||||
*/
|
||||
if (slot_name && *slot_name_given && !*slot_name)
|
||||
{
|
||||
|
|
@ -412,7 +412,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
|
|||
* info.
|
||||
*/
|
||||
tables = fetch_table_list(wrconn, publications);
|
||||
foreach (lc, tables)
|
||||
foreach(lc, tables)
|
||||
{
|
||||
RangeVar *rv = (RangeVar *) lfirst(lc);
|
||||
Oid relid;
|
||||
|
|
@ -431,9 +431,9 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
|
|||
(errmsg("synchronized table states")));
|
||||
|
||||
/*
|
||||
* If requested, create permanent slot for the subscription.
|
||||
* We won't use the initial snapshot for anything, so no need
|
||||
* to export it.
|
||||
* If requested, create permanent slot for the subscription. We
|
||||
* won't use the initial snapshot for anything, so no need to
|
||||
* export it.
|
||||
*/
|
||||
if (create_slot)
|
||||
{
|
||||
|
|
@ -505,31 +505,31 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
|
|||
subrel_states = GetSubscriptionRelations(sub->oid);
|
||||
|
||||
/*
|
||||
* Build qsorted array of local table oids for faster lookup.
|
||||
* This can potentially contain all tables in the database so
|
||||
* speed of lookup is important.
|
||||
* Build qsorted array of local table oids for faster lookup. This can
|
||||
* potentially contain all tables in the database so speed of lookup is
|
||||
* important.
|
||||
*/
|
||||
subrel_local_oids = palloc(list_length(subrel_states) * sizeof(Oid));
|
||||
off = 0;
|
||||
foreach(lc, subrel_states)
|
||||
{
|
||||
SubscriptionRelState *relstate = (SubscriptionRelState *) lfirst(lc);
|
||||
|
||||
subrel_local_oids[off++] = relstate->relid;
|
||||
}
|
||||
qsort(subrel_local_oids, list_length(subrel_states),
|
||||
sizeof(Oid), oid_cmp);
|
||||
|
||||
/*
|
||||
* Walk over the remote tables and try to match them to locally
|
||||
* known tables. If the table is not known locally create a new state
|
||||
* for it.
|
||||
* Walk over the remote tables and try to match them to locally known
|
||||
* tables. If the table is not known locally create a new state for it.
|
||||
*
|
||||
* Also builds array of local oids of remote tables for the next step.
|
||||
*/
|
||||
off = 0;
|
||||
pubrel_local_oids = palloc(list_length(pubrel_names) * sizeof(Oid));
|
||||
|
||||
foreach (lc, pubrel_names)
|
||||
foreach(lc, pubrel_names)
|
||||
{
|
||||
RangeVar *rv = (RangeVar *) lfirst(lc);
|
||||
Oid relid;
|
||||
|
|
@ -556,8 +556,8 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
|
|||
}
|
||||
|
||||
/*
|
||||
* Next remove state for tables we should not care about anymore using
|
||||
* the data we collected above
|
||||
* Next remove state for tables we should not care about anymore using the
|
||||
* data we collected above
|
||||
*/
|
||||
qsort(pubrel_local_oids, list_length(pubrel_names),
|
||||
sizeof(Oid), oid_cmp);
|
||||
|
|
@ -796,9 +796,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
|
|||
StringInfoData cmd;
|
||||
|
||||
/*
|
||||
* Lock pg_subscription with AccessExclusiveLock to ensure
|
||||
* that the launcher doesn't restart new worker during dropping
|
||||
* the subscription
|
||||
* Lock pg_subscription with AccessExclusiveLock to ensure that the
|
||||
* launcher doesn't restart new worker during dropping the subscription
|
||||
*/
|
||||
rel = heap_open(SubscriptionRelationId, AccessExclusiveLock);
|
||||
|
||||
|
|
@ -833,8 +832,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
|
|||
InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
|
||||
|
||||
/*
|
||||
* Lock the subscription so nobody else can do anything with it
|
||||
* (including the replication workers).
|
||||
* Lock the subscription so nobody else can do anything with it (including
|
||||
* the replication workers).
|
||||
*/
|
||||
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
|
||||
|
||||
|
|
@ -895,7 +894,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
|
|||
if (originid != InvalidRepOriginId)
|
||||
replorigin_drop(originid);
|
||||
|
||||
/* If there is no slot associated with the subscription, we can finish here. */
|
||||
/*
|
||||
* If there is no slot associated with the subscription, we can finish
|
||||
* here.
|
||||
*/
|
||||
if (!slotname)
|
||||
{
|
||||
heap_close(rel, NoLock);
|
||||
|
|
@ -903,8 +905,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
|
|||
}
|
||||
|
||||
/*
|
||||
* Otherwise drop the replication slot at the publisher node using
|
||||
* the replication connection.
|
||||
* Otherwise drop the replication slot at the publisher node using the
|
||||
* replication connection.
|
||||
*/
|
||||
load_file("libpqwalreceiver", false);
|
||||
|
||||
|
|
@ -923,6 +925,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
|
|||
PG_TRY();
|
||||
{
|
||||
WalRcvExecResult *res;
|
||||
|
||||
res = walrcv_exec(wrconn, cmd.data, 0, NULL);
|
||||
|
||||
if (res->status != WALRCV_OK_COMMAND)
|
||||
|
|
@ -1070,7 +1073,7 @@ fetch_table_list(WalReceiverConn *wrconn, List *publications)
|
|||
" FROM pg_catalog.pg_publication_tables t\n"
|
||||
" WHERE t.pubname IN (");
|
||||
first = true;
|
||||
foreach (lc, publications)
|
||||
foreach(lc, publications)
|
||||
{
|
||||
char *pubname = strVal(lfirst(lc));
|
||||
|
||||
|
|
|
|||
|
|
@ -643,8 +643,8 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
|
|||
descriptor->tdhasoid = (localHasOids || parentOidCount > 0);
|
||||
|
||||
/*
|
||||
* If a partitioned table doesn't have the system OID column, then none
|
||||
* of its partitions should have it.
|
||||
* If a partitioned table doesn't have the system OID column, then none of
|
||||
* its partitions should have it.
|
||||
*/
|
||||
if (stmt->partbound && parentOidCount == 0 && localHasOids)
|
||||
ereport(ERROR,
|
||||
|
|
@ -1112,9 +1112,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
|
|||
}
|
||||
|
||||
/*
|
||||
* Similarly, if we previously locked some other partition's heap, and
|
||||
* the name we're looking up no longer refers to that relation, release
|
||||
* the now-useless lock.
|
||||
* Similarly, if we previously locked some other partition's heap, and the
|
||||
* name we're looking up no longer refers to that relation, release the
|
||||
* now-useless lock.
|
||||
*/
|
||||
if (relOid != oldRelOid && OidIsValid(state->partParentOid))
|
||||
{
|
||||
|
|
@ -5589,8 +5589,8 @@ static void
|
|||
ATPrepDropNotNull(Relation rel, bool recurse, bool recursing)
|
||||
{
|
||||
/*
|
||||
* If the parent is a partitioned table, like check constraints, we do
|
||||
* not support removing the NOT NULL while partitions exist.
|
||||
* If the parent is a partitioned table, like check constraints, we do not
|
||||
* support removing the NOT NULL while partitions exist.
|
||||
*/
|
||||
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -3412,7 +3412,8 @@ typedef struct AfterTriggersData
|
|||
AfterTriggerEventList events; /* deferred-event list */
|
||||
int query_depth; /* current query list index */
|
||||
AfterTriggerEventList *query_stack; /* events pending from each query */
|
||||
Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from each query */
|
||||
Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from
|
||||
* each query */
|
||||
Tuplestorestate **old_tuplestores; /* all old tuples from each query */
|
||||
Tuplestorestate **new_tuplestores; /* all new tuples from each query */
|
||||
int maxquerydepth; /* allocated len of above array */
|
||||
|
|
|
|||
|
|
@ -415,6 +415,7 @@ ExecSupportsMarkRestore(Path *pathnode)
|
|||
case T_CustomScan:
|
||||
{
|
||||
CustomPath *customPath = castNode(CustomPath, pathnode);
|
||||
|
||||
if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
|
||||
return true;
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -608,9 +608,9 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
|
|||
/*
|
||||
* Also store the per-worker detail.
|
||||
*
|
||||
* Worker instrumentation should be allocated in the same context as
|
||||
* the regular instrumentation information, which is the per-query
|
||||
* context. Switch into per-query memory context.
|
||||
* Worker instrumentation should be allocated in the same context as the
|
||||
* regular instrumentation information, which is the per-query context.
|
||||
* Switch into per-query memory context.
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
|
||||
ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
|
|||
Relation idxrel;
|
||||
bool found;
|
||||
|
||||
/* Open the index.*/
|
||||
/* Open the index. */
|
||||
idxrel = index_open(idxoid, RowExclusiveLock);
|
||||
|
||||
/* Start an index scan. */
|
||||
|
|
@ -152,8 +152,8 @@ retry:
|
|||
snap.xmin : snap.xmax;
|
||||
|
||||
/*
|
||||
* If the tuple is locked, wait for locking transaction to finish
|
||||
* and retry.
|
||||
* If the tuple is locked, wait for locking transaction to finish and
|
||||
* retry.
|
||||
*/
|
||||
if (TransactionIdIsValid(xwait))
|
||||
{
|
||||
|
|
@ -177,7 +177,7 @@ retry:
|
|||
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
|
||||
lockmode,
|
||||
LockWaitBlock,
|
||||
false /* don't follow updates */,
|
||||
false /* don't follow updates */ ,
|
||||
&buf, &hufd);
|
||||
/* the tuple slot already has the buffer pinned */
|
||||
ReleaseBuffer(buf);
|
||||
|
|
@ -299,8 +299,8 @@ retry:
|
|||
snap.xmin : snap.xmax;
|
||||
|
||||
/*
|
||||
* If the tuple is locked, wait for locking transaction to finish
|
||||
* and retry.
|
||||
* If the tuple is locked, wait for locking transaction to finish and
|
||||
* retry.
|
||||
*/
|
||||
if (TransactionIdIsValid(xwait))
|
||||
{
|
||||
|
|
@ -324,7 +324,7 @@ retry:
|
|||
res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
|
||||
lockmode,
|
||||
LockWaitBlock,
|
||||
false /* don't follow updates */,
|
||||
false /* don't follow updates */ ,
|
||||
&buf, &hufd);
|
||||
/* the tuple slot already has the buffer pinned */
|
||||
ReleaseBuffer(buf);
|
||||
|
|
|
|||
|
|
@ -129,8 +129,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
|
|||
Assert(!(eflags & EXEC_FLAG_MARK));
|
||||
|
||||
/*
|
||||
* Lock the non-leaf tables in the partition tree controlled by this
|
||||
* node. It's a no-op for non-partitioned parent tables.
|
||||
* Lock the non-leaf tables in the partition tree controlled by this node.
|
||||
* It's a no-op for non-partitioned parent tables.
|
||||
*/
|
||||
ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
|
||||
|
||||
|
|
|
|||
|
|
@ -506,8 +506,9 @@ BitmapAdjustPrefetchIterator(BitmapHeapScanState *node,
|
|||
* In case of shared mode, we can not ensure that the current
|
||||
* blockno of the main iterator and that of the prefetch iterator
|
||||
* are same. It's possible that whatever blockno we are
|
||||
* prefetching will be processed by another process. Therefore, we
|
||||
* don't validate the blockno here as we do in non-parallel case.
|
||||
* prefetching will be processed by another process. Therefore,
|
||||
* we don't validate the blockno here as we do in non-parallel
|
||||
* case.
|
||||
*/
|
||||
if (prefetch_iterator)
|
||||
tbm_shared_iterate(prefetch_iterator);
|
||||
|
|
|
|||
|
|
@ -230,17 +230,17 @@ ExecGatherMerge(GatherMergeState *node)
|
|||
ResetExprContext(econtext);
|
||||
|
||||
/*
|
||||
* Get next tuple, either from one of our workers, or by running the
|
||||
* plan ourselves.
|
||||
* Get next tuple, either from one of our workers, or by running the plan
|
||||
* ourselves.
|
||||
*/
|
||||
slot = gather_merge_getnext(node);
|
||||
if (TupIsNull(slot))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* form the result tuple using ExecProject(), and return it --- unless
|
||||
* the projection produces an empty set, in which case we must loop
|
||||
* back around for another tuple
|
||||
* form the result tuple using ExecProject(), and return it --- unless the
|
||||
* projection produces an empty set, in which case we must loop back
|
||||
* around for another tuple
|
||||
*/
|
||||
econtext->ecxt_outertuple = slot;
|
||||
return ExecProject(node->ps.ps_ProjInfo);
|
||||
|
|
@ -534,8 +534,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
|
|||
HeapTuple tup = NULL;
|
||||
|
||||
/*
|
||||
* If we're being asked to generate a tuple from the leader, then we
|
||||
* just call ExecProcNode as normal to produce one.
|
||||
* If we're being asked to generate a tuple from the leader, then we just
|
||||
* call ExecProcNode as normal to produce one.
|
||||
*/
|
||||
if (gm_state->nreaders == reader)
|
||||
{
|
||||
|
|
@ -582,8 +582,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
|
|||
&tuple_buffer->done));
|
||||
|
||||
/*
|
||||
* Attempt to read more tuples in nowait mode and store them in
|
||||
* the tuple array.
|
||||
* Attempt to read more tuples in nowait mode and store them in the
|
||||
* tuple array.
|
||||
*/
|
||||
if (HeapTupleIsValid(tup))
|
||||
form_tuple_array(gm_state, reader);
|
||||
|
|
|
|||
|
|
@ -72,8 +72,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
|
|||
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
|
||||
|
||||
/*
|
||||
* Lock the non-leaf tables in the partition tree controlled by this
|
||||
* node. It's a no-op for non-partitioned parent tables.
|
||||
* Lock the non-leaf tables in the partition tree controlled by this node.
|
||||
* It's a no-op for non-partitioned parent tables.
|
||||
*/
|
||||
ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
|
||||
|
||||
|
|
|
|||
|
|
@ -1815,11 +1815,11 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
|
|||
}
|
||||
|
||||
/*
|
||||
* Build WITH CHECK OPTION constraints for each leaf partition rel.
|
||||
* Note that we didn't build the withCheckOptionList for each partition
|
||||
* within the planner, but simple translation of the varattnos for each
|
||||
* partition will suffice. This only occurs for the INSERT case;
|
||||
* UPDATE/DELETE cases are handled above.
|
||||
* Build WITH CHECK OPTION constraints for each leaf partition rel. Note
|
||||
* that we didn't build the withCheckOptionList for each partition within
|
||||
* the planner, but simple translation of the varattnos for each partition
|
||||
* will suffice. This only occurs for the INSERT case; UPDATE/DELETE
|
||||
* cases are handled above.
|
||||
*/
|
||||
if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -443,8 +443,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
|
|||
ExecClearTuple(tstate->ss.ss_ScanTupleSlot);
|
||||
|
||||
/*
|
||||
* Obtain the value of each column for this row, installing them into the
|
||||
* slot; then add the tuple to the tuplestore.
|
||||
* Obtain the value of each column for this row, installing them into
|
||||
* the slot; then add the tuple to the tuplestore.
|
||||
*/
|
||||
for (colno = 0; colno < natts; colno++)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -757,10 +757,10 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
|
|||
* If the user does not exist, or has no password or it's expired, we
|
||||
* still go through the motions of authentication, to avoid revealing to
|
||||
* the client that the user didn't exist. If 'md5' is allowed, we choose
|
||||
* whether to use 'md5' or 'scram-sha-256' authentication based on
|
||||
* current password_encryption setting. The idea is that most genuine
|
||||
* users probably have a password of that type, and if we pretend that
|
||||
* this user had a password of that type, too, it "blends in" best.
|
||||
* whether to use 'md5' or 'scram-sha-256' authentication based on current
|
||||
* password_encryption setting. The idea is that most genuine users
|
||||
* probably have a password of that type, and if we pretend that this user
|
||||
* had a password of that type, too, it "blends in" best.
|
||||
*/
|
||||
if (!shadow_pass)
|
||||
pwtype = Password_encryption;
|
||||
|
|
@ -770,8 +770,8 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
|
|||
/*
|
||||
* If 'md5' authentication is allowed, decide whether to perform 'md5' or
|
||||
* 'scram-sha-256' authentication based on the type of password the user
|
||||
* has. If it's an MD5 hash, we must do MD5 authentication, and if it's
|
||||
* a SCRAM verifier, we must do SCRAM authentication.
|
||||
* has. If it's an MD5 hash, we must do MD5 authentication, and if it's a
|
||||
* SCRAM verifier, we must do SCRAM authentication.
|
||||
*
|
||||
* If MD5 authentication is not allowed, always use SCRAM. If the user
|
||||
* had an MD5 password, CheckSCRAMAuth() will fail.
|
||||
|
|
|
|||
|
|
@ -122,8 +122,8 @@ encrypt_password(PasswordType target_type, const char *role,
|
|||
if (guessed_type != PASSWORD_TYPE_PLAINTEXT)
|
||||
{
|
||||
/*
|
||||
* Cannot convert an already-encrypted password from one
|
||||
* format to another, so return it as it is.
|
||||
* Cannot convert an already-encrypted password from one format to
|
||||
* another, so return it as it is.
|
||||
*/
|
||||
return pstrdup(password);
|
||||
}
|
||||
|
|
@ -274,6 +274,7 @@ plain_crypt_verify(const char *role, const char *shadow_pass,
|
|||
break;
|
||||
|
||||
case PASSWORD_TYPE_PLAINTEXT:
|
||||
|
||||
/*
|
||||
* We never store passwords in plaintext, so this shouldn't
|
||||
* happen.
|
||||
|
|
|
|||
|
|
@ -617,7 +617,10 @@ check_db(const char *dbname, const char *role, Oid roleid, List *tokens)
|
|||
tok = lfirst(cell);
|
||||
if (am_walsender && !am_db_walsender)
|
||||
{
|
||||
/* physical replication walsender connections can only match replication keyword */
|
||||
/*
|
||||
* physical replication walsender connections can only match
|
||||
* replication keyword
|
||||
*/
|
||||
if (token_is_keyword(tok, "replication"))
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1129,7 +1129,8 @@ exprSetCollation(Node *expr, Oid collation)
|
|||
Assert(!OidIsValid(collation)); /* result is always boolean */
|
||||
break;
|
||||
case T_NextValueExpr:
|
||||
Assert(!OidIsValid(collation)); /* result is always an integer type */
|
||||
Assert(!OidIsValid(collation)); /* result is always an integer
|
||||
* type */
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));
|
||||
|
|
|
|||
|
|
@ -905,8 +905,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
|
|||
|
||||
/*
|
||||
* For every shared iterator, referring to pagetable and iterator array,
|
||||
* increase the refcount by 1 so that while freeing the shared iterator
|
||||
* we don't free pagetable and iterator array until its refcount becomes 0.
|
||||
* increase the refcount by 1 so that while freeing the shared iterator we
|
||||
* don't free pagetable and iterator array until its refcount becomes 0.
|
||||
*/
|
||||
if (ptbase != NULL)
|
||||
pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
|
||||
|
|
|
|||
|
|
@ -648,6 +648,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
|
|||
return;
|
||||
|
||||
case RTE_NAMEDTUPLESTORE:
|
||||
|
||||
/*
|
||||
* tuplestore cannot be shared, at least without more
|
||||
* infrastructure to support that.
|
||||
|
|
@ -2220,7 +2221,7 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel)
|
|||
* For each useful ordering, we can consider an order-preserving Gather
|
||||
* Merge.
|
||||
*/
|
||||
foreach (lc, rel->partial_pathlist)
|
||||
foreach(lc, rel->partial_pathlist)
|
||||
{
|
||||
Path *subpath = (Path *) lfirst(lc);
|
||||
GatherMergePath *path;
|
||||
|
|
|
|||
|
|
@ -664,8 +664,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
|
|||
{
|
||||
/*
|
||||
* For index only scans compute workers based on number of index pages
|
||||
* fetched; the number of heap pages we fetch might be so small as
|
||||
* to effectively rule out parallelism, which we don't want to do.
|
||||
* fetched; the number of heap pages we fetch might be so small as to
|
||||
* effectively rule out parallelism, which we don't want to do.
|
||||
*/
|
||||
if (indexonly)
|
||||
rand_heap_pages = -1;
|
||||
|
|
|
|||
|
|
@ -1073,8 +1073,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
|
|||
true);
|
||||
|
||||
/*
|
||||
* if, after costing the path, we find that it's not worth
|
||||
* using parallel workers, just free it.
|
||||
* if, after costing the path, we find that it's not worth using
|
||||
* parallel workers, just free it.
|
||||
*/
|
||||
if (ipath->path.parallel_workers > 0)
|
||||
add_partial_path(rel, (Path *) ipath);
|
||||
|
|
|
|||
|
|
@ -1102,10 +1102,10 @@ inheritance_planner(PlannerInfo *root)
|
|||
/*
|
||||
* If the parent RTE is a partitioned table, we should use that as the
|
||||
* nominal relation, because the RTEs added for partitioned tables
|
||||
* (including the root parent) as child members of the inheritance set
|
||||
* do not appear anywhere else in the plan. The situation is exactly
|
||||
* the opposite in the case of non-partitioned inheritance parent as
|
||||
* described below.
|
||||
* (including the root parent) as child members of the inheritance set do
|
||||
* not appear anywhere else in the plan. The situation is exactly the
|
||||
* opposite in the case of non-partitioned inheritance parent as described
|
||||
* below.
|
||||
*/
|
||||
parent_rte = rt_fetch(parentRTindex, root->parse->rtable);
|
||||
if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
|
||||
|
|
@ -1278,9 +1278,9 @@ inheritance_planner(PlannerInfo *root)
|
|||
* is used elsewhere in the plan, so using the original parent RTE
|
||||
* would give rise to confusing use of multiple aliases in EXPLAIN
|
||||
* output for what the user will think is the "same" table. OTOH,
|
||||
* it's not a problem in the partitioned inheritance case, because
|
||||
* the duplicate child RTE added for the parent does not appear
|
||||
* anywhere else in the plan tree.
|
||||
* it's not a problem in the partitioned inheritance case, because the
|
||||
* duplicate child RTE added for the parent does not appear anywhere
|
||||
* else in the plan tree.
|
||||
*/
|
||||
if (nominalRelation < 0)
|
||||
nominalRelation = appinfo->child_relid;
|
||||
|
|
@ -4336,8 +4336,8 @@ consider_groupingsets_paths(PlannerInfo *root,
|
|||
/*
|
||||
* We treat this as a knapsack problem: the knapsack capacity
|
||||
* represents work_mem, the item weights are the estimated memory
|
||||
* usage of the hashtables needed to implement a single rollup, and
|
||||
* we really ought to use the cost saving as the item value;
|
||||
* usage of the hashtables needed to implement a single rollup,
|
||||
* and we really ought to use the cost saving as the item value;
|
||||
* however, currently the costs assigned to sort nodes don't
|
||||
* reflect the comparison costs well, and so we treat all items as
|
||||
* of equal value (each rollup we hash instead saves us one sort).
|
||||
|
|
|
|||
|
|
@ -883,8 +883,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
|
|||
* If the main target relation is a partitioned table, the
|
||||
* following list contains the RT indexes of partitioned child
|
||||
* relations including the root, which are not included in the
|
||||
* above list. We also keep RT indexes of the roots separately
|
||||
* to be identitied as such during the executor initialization.
|
||||
* above list. We also keep RT indexes of the roots
|
||||
* separately to be identitied as such during the executor
|
||||
* initialization.
|
||||
*/
|
||||
if (splan->partitioned_rels != NIL)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1555,9 +1555,10 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
|
|||
newrc->waitPolicy = oldrc->waitPolicy;
|
||||
|
||||
/*
|
||||
* We mark RowMarks for partitioned child tables as parent RowMarks
|
||||
* so that the executor ignores them (except their existence means
|
||||
* that the child tables be locked using appropriate mode).
|
||||
* We mark RowMarks for partitioned child tables as parent
|
||||
* RowMarks so that the executor ignores them (except their
|
||||
* existence means that the child tables be locked using
|
||||
* appropriate mode).
|
||||
*/
|
||||
newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE);
|
||||
|
||||
|
|
@ -1593,8 +1594,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
|
|||
* parent RT index to the list of RT indexes of its partitioned child
|
||||
* tables. When creating an Append or a ModifyTable path for the parent,
|
||||
* we copy the child RT index list verbatim to the path so that it could
|
||||
* be carried over to the executor so that the latter could identify
|
||||
* the partitioned child tables.
|
||||
* be carried over to the executor so that the latter could identify the
|
||||
* partitioned child tables.
|
||||
*/
|
||||
if (partitioned_child_rels != NIL)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -149,9 +149,9 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
|
|||
|
||||
/*
|
||||
* Pass top parent's relids down the inheritance hierarchy. If the parent
|
||||
* has top_parent_relids set, it's a direct or an indirect child of the top
|
||||
* parent indicated by top_parent_relids. By extension this child is also
|
||||
* an indirect child of that parent.
|
||||
* has top_parent_relids set, it's a direct or an indirect child of the
|
||||
* top parent indicated by top_parent_relids. By extension this child is
|
||||
* also an indirect child of that parent.
|
||||
*/
|
||||
if (parent)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -792,9 +792,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
|
|||
makeString(pstrdup(rawc->colname)));
|
||||
|
||||
/*
|
||||
* Determine the type and typmod for the new column. FOR
|
||||
* ORDINALITY columns are INTEGER per spec; the others are
|
||||
* user-specified.
|
||||
* Determine the type and typmod for the new column. FOR ORDINALITY
|
||||
* columns are INTEGER per spec; the others are user-specified.
|
||||
*/
|
||||
if (rawc->for_ordinality)
|
||||
{
|
||||
|
|
@ -1050,7 +1049,6 @@ transformRangeTableSample(ParseState *pstate, RangeTableSample *rts)
|
|||
static RangeTblEntry *
|
||||
getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv)
|
||||
{
|
||||
|
||||
CommonTableExpr *cte;
|
||||
Index levelsup;
|
||||
RangeTblEntry *rte = NULL;
|
||||
|
|
|
|||
|
|
@ -1164,6 +1164,7 @@ parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockmode)
|
|||
*/
|
||||
if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname))
|
||||
rel = NULL;
|
||||
|
||||
/*
|
||||
* An unqualified name might have been meant as a reference to
|
||||
* some not-yet-in-scope CTE. The bare "does not exist" message
|
||||
|
|
|
|||
|
|
@ -378,12 +378,12 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
|
|||
* used by pg_dump. Else, generate a name.
|
||||
*
|
||||
* Although we use ChooseRelationName, it's not guaranteed that the
|
||||
* selected sequence name won't conflict; given sufficiently long
|
||||
* field names, two different serial columns in the same table could
|
||||
* be assigned the same sequence name, and we'd not notice since we
|
||||
* aren't creating the sequence quite yet. In practice this seems
|
||||
* quite unlikely to be a problem, especially since few people would
|
||||
* need two serial columns in one table.
|
||||
* selected sequence name won't conflict; given sufficiently long field
|
||||
* names, two different serial columns in the same table could be assigned
|
||||
* the same sequence name, and we'd not notice since we aren't creating
|
||||
* the sequence quite yet. In practice this seems quite unlikely to be a
|
||||
* problem, especially since few people would need two serial columns in
|
||||
* one table.
|
||||
*/
|
||||
|
||||
foreach(option, seqoptions)
|
||||
|
|
@ -403,6 +403,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
|
|||
if (nameEl)
|
||||
{
|
||||
RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
|
||||
|
||||
snamespace = rv->schemaname;
|
||||
sname = rv->relname;
|
||||
seqoptions = list_delete_ptr(seqoptions, nameEl);
|
||||
|
|
@ -429,14 +430,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
|
|||
cxt->relation->relname, column->colname)));
|
||||
|
||||
/*
|
||||
* Build a CREATE SEQUENCE command to create the sequence object, and
|
||||
* add it to the list of things to be done before this CREATE/ALTER
|
||||
* TABLE.
|
||||
* Build a CREATE SEQUENCE command to create the sequence object, and add
|
||||
* it to the list of things to be done before this CREATE/ALTER TABLE.
|
||||
*/
|
||||
seqstmt = makeNode(CreateSeqStmt);
|
||||
seqstmt->for_identity = for_identity;
|
||||
seqstmt->sequence = makeRangeVar(snamespace, sname, -1);
|
||||
seqstmt->options = seqoptions;
|
||||
|
||||
/*
|
||||
* If a sequence data type was specified, add it to the options. Prepend
|
||||
* to the list rather than append; in case a user supplied their own AS
|
||||
|
|
@ -448,11 +449,11 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
|
|||
seqstmt->options);
|
||||
|
||||
/*
|
||||
* If this is ALTER ADD COLUMN, make sure the sequence will be owned
|
||||
* by the table's owner. The current user might be someone else
|
||||
* (perhaps a superuser, or someone who's only a member of the owning
|
||||
* role), but the SEQUENCE OWNED BY mechanisms will bleat unless table
|
||||
* and sequence have exactly the same owning role.
|
||||
* If this is ALTER ADD COLUMN, make sure the sequence will be owned by
|
||||
* the table's owner. The current user might be someone else (perhaps a
|
||||
* superuser, or someone who's only a member of the owning role), but the
|
||||
* SEQUENCE OWNED BY mechanisms will bleat unless table and sequence have
|
||||
* exactly the same owning role.
|
||||
*/
|
||||
if (cxt->rel)
|
||||
seqstmt->ownerId = cxt->rel->rd_rel->relowner;
|
||||
|
|
@ -462,9 +463,9 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
|
|||
cxt->blist = lappend(cxt->blist, seqstmt);
|
||||
|
||||
/*
|
||||
* Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence
|
||||
* as owned by this column, and add it to the list of things to be
|
||||
* done after this CREATE/ALTER TABLE.
|
||||
* Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence as
|
||||
* owned by this column, and add it to the list of things to be done after
|
||||
* this CREATE/ALTER TABLE.
|
||||
*/
|
||||
altseqstmt = makeNode(AlterSeqStmt);
|
||||
altseqstmt->sequence = makeRangeVar(snamespace, sname, -1);
|
||||
|
|
@ -2766,7 +2767,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
|
|||
* change the data type of the sequence.
|
||||
*/
|
||||
attnum = get_attnum(relid, cmd->name);
|
||||
/* if attribute not found, something will error about it later */
|
||||
|
||||
/*
|
||||
* if attribute not found, something will error about it
|
||||
* later
|
||||
*/
|
||||
if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum))
|
||||
{
|
||||
Oid seq_relid = getOwnedSequence(relid, attnum);
|
||||
|
|
@ -2796,7 +2801,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
|
|||
cmd->def = (Node *) newdef;
|
||||
|
||||
attnum = get_attnum(relid, cmd->name);
|
||||
/* if attribute not found, something will error about it later */
|
||||
|
||||
/*
|
||||
* if attribute not found, something will error about it
|
||||
* later
|
||||
*/
|
||||
if (attnum != InvalidAttrNumber)
|
||||
generateSerialExtraStmts(&cxt, newdef,
|
||||
get_atttype(relid, attnum),
|
||||
|
|
@ -2854,8 +2863,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
|
|||
cxt.alist = lappend(cxt.alist, seqstmt);
|
||||
}
|
||||
}
|
||||
/* If column was not found or was not an identity column, we
|
||||
* just let the ALTER TABLE command error out later. */
|
||||
|
||||
/*
|
||||
* If column was not found or was not an identity column,
|
||||
* we just let the ALTER TABLE command error out later.
|
||||
*/
|
||||
|
||||
cmd->def = (Node *) newdef;
|
||||
newcmds = lappend(newcmds, cmd);
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ PosixSemaphoreCreate(void)
|
|||
* Attempt to create a new unnamed semaphore.
|
||||
*/
|
||||
static void
|
||||
PosixSemaphoreCreate(sem_t * sem)
|
||||
PosixSemaphoreCreate(sem_t *sem)
|
||||
{
|
||||
if (sem_init(sem, 1, 1) < 0)
|
||||
elog(FATAL, "sem_init failed: %m");
|
||||
|
|
@ -137,7 +137,7 @@ PosixSemaphoreCreate(sem_t * sem)
|
|||
* PosixSemaphoreKill - removes a semaphore
|
||||
*/
|
||||
static void
|
||||
PosixSemaphoreKill(sem_t * sem)
|
||||
PosixSemaphoreKill(sem_t *sem)
|
||||
{
|
||||
#ifdef USE_NAMED_POSIX_SEMAPHORES
|
||||
/* Got to use sem_close for named semaphores */
|
||||
|
|
|
|||
|
|
@ -310,8 +310,8 @@ BackgroundWriterMain(void)
|
|||
* check whether there has been any WAL inserted since the last time
|
||||
* we've logged a running xacts.
|
||||
*
|
||||
* We do this logging in the bgwriter as it is the only process that is
|
||||
* run regularly and returns to its mainloop all the time. E.g.
|
||||
* We do this logging in the bgwriter as it is the only process that
|
||||
* is run regularly and returns to its mainloop all the time. E.g.
|
||||
* Checkpointer, when active, is barely ever in its mainloop and thus
|
||||
* makes it hard to log regularly.
|
||||
*/
|
||||
|
|
@ -350,7 +350,7 @@ BackgroundWriterMain(void)
|
|||
*/
|
||||
rc = WaitLatch(MyLatch,
|
||||
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
|
||||
BgWriterDelay /* ms */, WAIT_EVENT_BGWRITER_MAIN);
|
||||
BgWriterDelay /* ms */ , WAIT_EVENT_BGWRITER_MAIN);
|
||||
|
||||
/*
|
||||
* If no latch event and BgBufferSync says nothing's happening, extend
|
||||
|
|
|
|||
|
|
@ -558,7 +558,7 @@ CheckpointerMain(void)
|
|||
|
||||
rc = WaitLatch(MyLatch,
|
||||
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
|
||||
cur_timeout * 1000L /* convert to ms */,
|
||||
cur_timeout * 1000L /* convert to ms */ ,
|
||||
WAIT_EVENT_CHECKPOINTER_MAIN);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ static TabStatusArray *pgStatTabList = NULL;
|
|||
typedef struct TabStatHashEntry
|
||||
{
|
||||
Oid t_id;
|
||||
PgStat_TableStatus* tsa_entry;
|
||||
PgStat_TableStatus *tsa_entry;
|
||||
} TabStatHashEntry;
|
||||
|
||||
/*
|
||||
|
|
@ -1748,7 +1748,7 @@ pgstat_initstats(Relation rel)
|
|||
static PgStat_TableStatus *
|
||||
get_tabstat_entry(Oid rel_id, bool isshared)
|
||||
{
|
||||
TabStatHashEntry* hash_entry;
|
||||
TabStatHashEntry *hash_entry;
|
||||
PgStat_TableStatus *entry;
|
||||
TabStatusArray *tsa;
|
||||
bool found;
|
||||
|
|
@ -1837,14 +1837,14 @@ get_tabstat_entry(Oid rel_id, bool isshared)
|
|||
PgStat_TableStatus *
|
||||
find_tabstat_entry(Oid rel_id)
|
||||
{
|
||||
TabStatHashEntry* hash_entry;
|
||||
TabStatHashEntry *hash_entry;
|
||||
|
||||
/* If hashtable doesn't exist, there are no entries at all */
|
||||
if(!pgStatTabHash)
|
||||
if (!pgStatTabHash)
|
||||
return NULL;
|
||||
|
||||
hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
|
||||
if(!hash_entry)
|
||||
if (!hash_entry)
|
||||
return NULL;
|
||||
|
||||
/* Note that this step could also return NULL, but that's correct */
|
||||
|
|
@ -4061,6 +4061,7 @@ pgstat_get_backend_desc(BackendType backendType)
|
|||
|
||||
return backendDesc;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------
|
||||
* Local support functions follow
|
||||
* ------------------------------------------------------------
|
||||
|
|
@ -4405,7 +4406,7 @@ PgstatCollectorMain(int argc, char *argv[])
|
|||
wr = WaitLatchOrSocket(MyLatch,
|
||||
WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
|
||||
pgStatSock,
|
||||
2 * 1000L /* msec */,
|
||||
2 * 1000L /* msec */ ,
|
||||
WAIT_EVENT_PGSTAT_MAIN);
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -5149,11 +5149,12 @@ RandomCancelKey(int32 *cancel_key)
|
|||
#ifdef HAVE_STRONG_RANDOM
|
||||
return pg_strong_random((char *) cancel_key, sizeof(int32));
|
||||
#else
|
||||
|
||||
/*
|
||||
* If built with --disable-strong-random, use plain old erand48.
|
||||
*
|
||||
* We cannot use pg_backend_random() in postmaster, because it stores
|
||||
* its state in shared memory.
|
||||
* We cannot use pg_backend_random() in postmaster, because it stores its
|
||||
* state in shared memory.
|
||||
*/
|
||||
static unsigned short seed[3];
|
||||
|
||||
|
|
@ -5348,10 +5349,10 @@ StartAutovacuumWorker(void)
|
|||
if (canAcceptConnections() == CAC_OK)
|
||||
{
|
||||
/*
|
||||
* Compute the cancel key that will be assigned to this session.
|
||||
* We probably don't need cancel keys for autovac workers, but
|
||||
* we'd better have something random in the field to prevent
|
||||
* unfriendly people from sending cancels to them.
|
||||
* Compute the cancel key that will be assigned to this session. We
|
||||
* probably don't need cancel keys for autovac workers, but we'd
|
||||
* better have something random in the field to prevent unfriendly
|
||||
* people from sending cancels to them.
|
||||
*/
|
||||
if (!RandomCancelKey(&MyCancelKey))
|
||||
{
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ static bool sendFile(char *readfilename, char *tarfilename,
|
|||
static void sendFileWithContent(const char *filename, const char *content);
|
||||
static int64 _tarWriteHeader(const char *filename, const char *linktarget,
|
||||
struct stat * statbuf, bool sizeonly);
|
||||
static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
|
||||
static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
|
||||
bool sizeonly);
|
||||
static void send_int8_string(StringInfoData *buf, int64 intval);
|
||||
static void SendBackupHeader(List *tablespaces);
|
||||
|
|
@ -106,15 +106,15 @@ static const char *excludeDirContents[] =
|
|||
{
|
||||
/*
|
||||
* Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even
|
||||
* when stats_temp_directory is set because PGSS_TEXT_FILE is always created
|
||||
* there.
|
||||
* when stats_temp_directory is set because PGSS_TEXT_FILE is always
|
||||
* created there.
|
||||
*/
|
||||
PG_STAT_TMP_DIR,
|
||||
|
||||
/*
|
||||
* It is generally not useful to backup the contents of this directory even
|
||||
* if the intention is to restore to another master. See backup.sgml for a
|
||||
* more detailed description.
|
||||
* It is generally not useful to backup the contents of this directory
|
||||
* even if the intention is to restore to another master. See backup.sgml
|
||||
* for a more detailed description.
|
||||
*/
|
||||
"pg_replslot",
|
||||
|
||||
|
|
@ -404,8 +404,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
|
|||
qsort(walFiles, nWalFiles, sizeof(char *), compareWalFileNames);
|
||||
|
||||
/*
|
||||
* There must be at least one xlog file in the pg_wal directory,
|
||||
* since we are doing backup-including-xlog.
|
||||
* There must be at least one xlog file in the pg_wal directory, since
|
||||
* we are doing backup-including-xlog.
|
||||
*/
|
||||
if (nWalFiles < 1)
|
||||
ereport(ERROR,
|
||||
|
|
@ -1314,7 +1314,7 @@ _tarWriteHeader(const char *filename, const char *linktarget,
|
|||
* write it as a directory anyway.
|
||||
*/
|
||||
static int64
|
||||
_tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
|
||||
_tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
|
||||
bool sizeonly)
|
||||
{
|
||||
/* If symlink, write it as a directory anyway */
|
||||
|
|
|
|||
|
|
@ -435,8 +435,8 @@ libpqrcv_endstreaming(WalReceiverConn *conn, TimeLineID *next_tli)
|
|||
* next timeline's ID, or just CommandComplete if the server was shut
|
||||
* down.
|
||||
*
|
||||
* If we had not yet received CopyDone from the backend, PGRES_COPY_OUT
|
||||
* is also possible in case we aborted the copy in mid-stream.
|
||||
* If we had not yet received CopyDone from the backend, PGRES_COPY_OUT is
|
||||
* also possible in case we aborted the copy in mid-stream.
|
||||
*/
|
||||
res = PQgetResult(conn->streamConn);
|
||||
if (PQresultStatus(res) == PGRES_TUPLES_OK)
|
||||
|
|
@ -545,9 +545,9 @@ libpqrcv_PQexec(PGconn *streamConn, const char *query)
|
|||
|
||||
/*
|
||||
* PQexec() silently discards any prior query results on the connection.
|
||||
* This is not required for this function as it's expected that the
|
||||
* caller (which is this library in all cases) will behave correctly and
|
||||
* we don't have to be backwards compatible with old libpq.
|
||||
* This is not required for this function as it's expected that the caller
|
||||
* (which is this library in all cases) will behave correctly and we don't
|
||||
* have to be backwards compatible with old libpq.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
|
@ -941,7 +941,7 @@ stringlist_to_identifierstr(PGconn *conn, List *strings)
|
|||
|
||||
initStringInfo(&res);
|
||||
|
||||
foreach (lc, strings)
|
||||
foreach(lc, strings)
|
||||
{
|
||||
char *val = strVal(lfirst(lc));
|
||||
char *val_escaped;
|
||||
|
|
|
|||
|
|
@ -233,6 +233,7 @@ logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
|
|||
for (i = 0; i < max_logical_replication_workers; i++)
|
||||
{
|
||||
LogicalRepWorker *w = &LogicalRepCtx->workers[i];
|
||||
|
||||
if (w->in_use && w->subid == subid && w->relid == relid &&
|
||||
(!only_running || w->proc))
|
||||
{
|
||||
|
|
@ -660,6 +661,7 @@ logicalrep_sync_worker_count(Oid subid)
|
|||
for (i = 0; i < max_logical_replication_workers; i++)
|
||||
{
|
||||
LogicalRepWorker *w = &LogicalRepCtx->workers[i];
|
||||
|
||||
if (w->subid == subid && OidIsValid(w->relid))
|
||||
res++;
|
||||
}
|
||||
|
|
@ -864,9 +866,9 @@ ApplyLauncherMain(Datum main_arg)
|
|||
{
|
||||
/*
|
||||
* The wait in previous cycle was interrupted in less than
|
||||
* wal_retrieve_retry_interval since last worker was started,
|
||||
* this usually means crash of the worker, so we should retry
|
||||
* in wal_retrieve_retry_interval again.
|
||||
* wal_retrieve_retry_interval since last worker was started, this
|
||||
* usually means crash of the worker, so we should retry in
|
||||
* wal_retrieve_retry_interval again.
|
||||
*/
|
||||
wait_time = wal_retrieve_retry_interval;
|
||||
}
|
||||
|
|
@ -992,7 +994,10 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS)
|
|||
|
||||
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
|
||||
|
||||
/* If only a single subscription was requested, and we found it, break. */
|
||||
/*
|
||||
* If only a single subscription was requested, and we found it,
|
||||
* break.
|
||||
*/
|
||||
if (OidIsValid(subid))
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -328,17 +328,19 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
|
|||
if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm)
|
||||
{
|
||||
LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr);
|
||||
|
||||
/*
|
||||
* If only the confirmed_flush_lsn has changed the slot won't get
|
||||
* marked as dirty by the above. Callers on the walsender interface
|
||||
* are expected to keep track of their own progress and don't need
|
||||
* it written out. But SQL-interface users cannot specify their own
|
||||
* start positions and it's harder for them to keep track of their
|
||||
* progress, so we should make more of an effort to save it for them.
|
||||
* marked as dirty by the above. Callers on the walsender
|
||||
* interface are expected to keep track of their own progress and
|
||||
* don't need it written out. But SQL-interface users cannot
|
||||
* specify their own start positions and it's harder for them to
|
||||
* keep track of their progress, so we should make more of an
|
||||
* effort to save it for them.
|
||||
*
|
||||
* Dirty the slot so it's written out at the next checkpoint. We'll
|
||||
* still lose its position on crash, as documented, but it's better
|
||||
* than always losing the position even on clean restart.
|
||||
* Dirty the slot so it's written out at the next checkpoint.
|
||||
* We'll still lose its position on crash, as documented, but it's
|
||||
* better than always losing the position even on clean restart.
|
||||
*/
|
||||
ReplicationSlotMarkDirty();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ logicalrep_relmap_init(void)
|
|||
|
||||
/* This will usually be small. */
|
||||
LogicalRepTypMap = hash_create("logicalrep type map cache", 2, &ctl,
|
||||
HASH_ELEM | HASH_BLOBS |HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/* Watch for invalidation events. */
|
||||
CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
|
||||
|
|
@ -246,6 +246,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
|
|||
TupleDesc desc;
|
||||
LogicalRepRelation *remoterel;
|
||||
MemoryContext oldctx;
|
||||
|
||||
remoterel = &entry->remoterel;
|
||||
|
||||
/* Try to find and lock the relation by name. */
|
||||
|
|
@ -265,8 +266,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
|
|||
|
||||
/*
|
||||
* Build the mapping of local attribute numbers to remote attribute
|
||||
* numbers and validate that we don't miss any replicated columns
|
||||
* as that would result in potentially unwanted data loss.
|
||||
* numbers and validate that we don't miss any replicated columns as
|
||||
* that would result in potentially unwanted data loss.
|
||||
*/
|
||||
desc = RelationGetDescr(entry->localrel);
|
||||
oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
|
||||
|
|
@ -278,6 +279,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
|
|||
{
|
||||
int attnum = logicalrep_rel_att_by_name(remoterel,
|
||||
NameStr(desc->attrs[i]->attname));
|
||||
|
||||
entry->attrmap[i] = attnum;
|
||||
if (attnum >= 0)
|
||||
found++;
|
||||
|
|
@ -299,8 +301,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
|
|||
* but in the opposite scenario it will.
|
||||
*
|
||||
* Don't throw any error here just mark the relation entry as not
|
||||
* updatable, as replica identity is only for updates and deletes
|
||||
* but inserts can be replicated even without it.
|
||||
* updatable, as replica identity is only for updates and deletes but
|
||||
* inserts can be replicated even without it.
|
||||
*/
|
||||
entry->updatable = true;
|
||||
idkey = RelationGetIndexAttrBitmap(entry->localrel,
|
||||
|
|
@ -310,6 +312,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
|
|||
{
|
||||
idkey = RelationGetIndexAttrBitmap(entry->localrel,
|
||||
INDEX_ATTR_BITMAP_PRIMARY_KEY);
|
||||
|
||||
/*
|
||||
* If no replica identity index and no PK, the published table
|
||||
* must have replica identity FULL.
|
||||
|
|
|
|||
|
|
@ -986,6 +986,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
|
|||
if (NormalTransactionIdFollows(subxid, xmax))
|
||||
xmax = subxid;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're forcing timetravel we also need visibility information
|
||||
* about subtransaction, so keep track of subtransaction's state, even
|
||||
|
|
@ -1031,8 +1032,8 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
|
|||
|
||||
/*
|
||||
* Adjust xmax of the snapshot builder, we only do that for committed,
|
||||
* catalog modifying, transactions, everything else isn't interesting
|
||||
* for us since we'll never look at the respective rows.
|
||||
* catalog modifying, transactions, everything else isn't interesting for
|
||||
* us since we'll never look at the respective rows.
|
||||
*/
|
||||
if (needs_timetravel &&
|
||||
(!TransactionIdIsValid(builder->xmax) ||
|
||||
|
|
@ -1130,8 +1131,8 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact
|
|||
running->oldestRunningXid);
|
||||
|
||||
/*
|
||||
* Increase shared memory limits, so vacuum can work on tuples we prevented
|
||||
* from being pruned till now.
|
||||
* Increase shared memory limits, so vacuum can work on tuples we
|
||||
* prevented from being pruned till now.
|
||||
*/
|
||||
LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid);
|
||||
|
||||
|
|
@ -1271,6 +1272,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
|
|||
/* there won't be any state to cleanup */
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* c) transition from START to BUILDING_SNAPSHOT.
|
||||
*
|
||||
|
|
@ -1308,6 +1310,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
|
|||
|
||||
SnapBuildWaitSnapshot(running, running->nextXid);
|
||||
}
|
||||
|
||||
/*
|
||||
* c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT.
|
||||
*
|
||||
|
|
@ -1331,6 +1334,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
|
|||
|
||||
SnapBuildWaitSnapshot(running, running->nextXid);
|
||||
}
|
||||
|
||||
/*
|
||||
* c) transition from FULL_SNAPSHOT to CONSISTENT.
|
||||
*
|
||||
|
|
@ -1383,9 +1387,9 @@ SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff)
|
|||
TransactionId xid = running->xids[off];
|
||||
|
||||
/*
|
||||
* Upper layers should prevent that we ever need to wait on
|
||||
* ourselves. Check anyway, since failing to do so would either
|
||||
* result in an endless wait or an Assert() failure.
|
||||
* Upper layers should prevent that we ever need to wait on ourselves.
|
||||
* Check anyway, since failing to do so would either result in an
|
||||
* endless wait or an Assert() failure.
|
||||
*/
|
||||
if (TransactionIdIsCurrentTransactionId(xid))
|
||||
elog(ERROR, "waiting for ourselves");
|
||||
|
|
@ -1864,8 +1868,9 @@ CheckPointSnapBuild(void)
|
|||
char path[MAXPGPATH + 21];
|
||||
|
||||
/*
|
||||
* We start off with a minimum of the last redo pointer. No new replication
|
||||
* slot will start before that, so that's a safe upper bound for removal.
|
||||
* We start off with a minimum of the last redo pointer. No new
|
||||
* replication slot will start before that, so that's a safe upper bound
|
||||
* for removal.
|
||||
*/
|
||||
redo = GetRedoRecPtr();
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,8 @@ StringInfo copybuf = NULL;
|
|||
/*
|
||||
* Exit routine for synchronization worker.
|
||||
*/
|
||||
static void pg_attribute_noreturn()
|
||||
static void
|
||||
pg_attribute_noreturn()
|
||||
finish_sync_worker(void)
|
||||
{
|
||||
/*
|
||||
|
|
@ -324,6 +325,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
|
|||
last_start_times = hash_create("Logical replication table sync worker start times",
|
||||
256, &ctl, HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clean up the hash table when we're done with all tables (just to
|
||||
* release the bit of memory).
|
||||
|
|
@ -337,14 +339,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
|
|||
/* Process all tables that are being synchronized. */
|
||||
foreach(lc, table_states)
|
||||
{
|
||||
SubscriptionRelState *rstate = (SubscriptionRelState *)lfirst(lc);
|
||||
SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc);
|
||||
|
||||
if (rstate->state == SUBREL_STATE_SYNCDONE)
|
||||
{
|
||||
/*
|
||||
* Apply has caught up to the position where the table sync
|
||||
* has finished. Time to mark the table as ready so that
|
||||
* apply will just continue to replicate it normally.
|
||||
* Apply has caught up to the position where the table sync has
|
||||
* finished. Time to mark the table as ready so that apply will
|
||||
* just continue to replicate it normally.
|
||||
*/
|
||||
if (current_lsn >= rstate->lsn)
|
||||
{
|
||||
|
|
@ -376,6 +378,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
|
|||
SpinLockRelease(&syncworker->relmutex);
|
||||
}
|
||||
else
|
||||
|
||||
/*
|
||||
* If no sync worker for this table yet, count running sync
|
||||
* workers for this subscription, while we have the lock, for
|
||||
|
|
@ -398,12 +401,12 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
|
|||
*
|
||||
* b) Apply is behind the table sync: We tell the table sync
|
||||
* to mark the table as SYNCDONE and finish.
|
||||
|
||||
*
|
||||
* c) Apply and table sync are at the same position: We tell
|
||||
* table sync to mark the table as READY and finish.
|
||||
*
|
||||
* In any case we'll need to wait for table sync to change
|
||||
* the state in catalog and only then continue ourselves.
|
||||
* In any case we'll need to wait for table sync to change the
|
||||
* state in catalog and only then continue ourselves.
|
||||
*/
|
||||
if (current_lsn > rstate->lsn)
|
||||
{
|
||||
|
|
@ -427,16 +430,15 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
|
|||
logicalrep_worker_wakeup_ptr(syncworker);
|
||||
|
||||
/*
|
||||
* Enter busy loop and wait for synchronization status
|
||||
* change.
|
||||
* Enter busy loop and wait for synchronization status change.
|
||||
*/
|
||||
wait_for_sync_status_change(rstate->relid, rstate->state);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is no sync worker registered for the table and
|
||||
* there is some free sync worker slot, start new sync worker
|
||||
* for the table.
|
||||
* If there is no sync worker registered for the table and there
|
||||
* is some free sync worker slot, start new sync worker for the
|
||||
* table.
|
||||
*/
|
||||
else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription)
|
||||
{
|
||||
|
|
@ -818,24 +820,23 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
|
|||
pgstat_report_stat(false);
|
||||
|
||||
/*
|
||||
* We want to do the table data sync in single
|
||||
* transaction.
|
||||
* We want to do the table data sync in single transaction.
|
||||
*/
|
||||
StartTransactionCommand();
|
||||
|
||||
/*
|
||||
* Use standard write lock here. It might be better to
|
||||
* disallow access to table while it's being synchronized.
|
||||
* But we don't want to block the main apply process from
|
||||
* working and it has to open relation in RowExclusiveLock
|
||||
* when remapping remote relation id to local one.
|
||||
* disallow access to table while it's being synchronized. But
|
||||
* we don't want to block the main apply process from working
|
||||
* and it has to open relation in RowExclusiveLock when
|
||||
* remapping remote relation id to local one.
|
||||
*/
|
||||
rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Create temporary slot for the sync process.
|
||||
* We do this inside transaction so that we can use the
|
||||
* snapshot made by the slot to get existing data.
|
||||
* Create temporary slot for the sync process. We do this
|
||||
* inside transaction so that we can use the snapshot made by
|
||||
* the slot to get existing data.
|
||||
*/
|
||||
res = walrcv_exec(wrconn,
|
||||
"BEGIN READ ONLY ISOLATION LEVEL "
|
||||
|
|
@ -849,10 +850,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
|
|||
/*
|
||||
* Create new temporary logical decoding slot.
|
||||
*
|
||||
* We'll use slot for data copy so make sure the snapshot
|
||||
* is used for the transaction, that way the COPY will get
|
||||
* data that is consistent with the lsn used by the slot
|
||||
* to start decoding.
|
||||
* We'll use slot for data copy so make sure the snapshot is
|
||||
* used for the transaction, that way the COPY will get data
|
||||
* that is consistent with the lsn used by the slot to start
|
||||
* decoding.
|
||||
*/
|
||||
walrcv_create_slot(wrconn, slotname, true,
|
||||
CRS_USE_SNAPSHOT, origin_startpos);
|
||||
|
|
@ -872,8 +873,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
|
|||
CommandCounterIncrement();
|
||||
|
||||
/*
|
||||
* We are done with the initial data synchronization,
|
||||
* update the state.
|
||||
* We are done with the initial data synchronization, update
|
||||
* the state.
|
||||
*/
|
||||
SpinLockAcquire(&MyLogicalRepWorker->relmutex);
|
||||
MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT;
|
||||
|
|
@ -881,8 +882,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
|
|||
SpinLockRelease(&MyLogicalRepWorker->relmutex);
|
||||
|
||||
/*
|
||||
* Wait for main apply worker to either tell us to
|
||||
* catchup or that we are done.
|
||||
* Wait for main apply worker to either tell us to catchup or
|
||||
* that we are done.
|
||||
*/
|
||||
wait_for_sync_status_change(MyLogicalRepWorker->relid,
|
||||
MyLogicalRepWorker->relstate);
|
||||
|
|
|
|||
|
|
@ -476,8 +476,8 @@ static void
|
|||
apply_handle_origin(StringInfo s)
|
||||
{
|
||||
/*
|
||||
* ORIGIN message can only come inside remote transaction and before
|
||||
* any actual writes.
|
||||
* ORIGIN message can only come inside remote transaction and before any
|
||||
* actual writes.
|
||||
*/
|
||||
if (!in_remote_transaction ||
|
||||
(IsTransactionState() && !am_tablesync_worker()))
|
||||
|
|
@ -607,8 +607,8 @@ check_relation_updatable(LogicalRepRelMapEntry *rel)
|
|||
return;
|
||||
|
||||
/*
|
||||
* We are in error mode so it's fine this is somewhat slow.
|
||||
* It's better to give user correct error.
|
||||
* We are in error mode so it's fine this is somewhat slow. It's better to
|
||||
* give user correct error.
|
||||
*/
|
||||
if (OidIsValid(GetRelationIdentityOrPK(rel->localrel)))
|
||||
{
|
||||
|
|
@ -685,8 +685,8 @@ apply_handle_update(StringInfo s)
|
|||
MemoryContextSwitchTo(oldctx);
|
||||
|
||||
/*
|
||||
* Try to find tuple using either replica identity index, primary key
|
||||
* or if needed, sequential scan.
|
||||
* Try to find tuple using either replica identity index, primary key or
|
||||
* if needed, sequential scan.
|
||||
*/
|
||||
idxoid = GetRelationIdentityOrPK(rel->localrel);
|
||||
Assert(OidIsValid(idxoid) ||
|
||||
|
|
@ -802,8 +802,8 @@ apply_handle_delete(StringInfo s)
|
|||
MemoryContextSwitchTo(oldctx);
|
||||
|
||||
/*
|
||||
* Try to find tuple using either replica identity index, primary key
|
||||
* or if needed, sequential scan.
|
||||
* Try to find tuple using either replica identity index, primary key or
|
||||
* if needed, sequential scan.
|
||||
*/
|
||||
idxoid = GetRelationIdentityOrPK(rel->localrel);
|
||||
Assert(OidIsValid(idxoid) ||
|
||||
|
|
@ -826,7 +826,7 @@ apply_handle_delete(StringInfo s)
|
|||
}
|
||||
else
|
||||
{
|
||||
/* The tuple to be deleted could not be found.*/
|
||||
/* The tuple to be deleted could not be found. */
|
||||
ereport(DEBUG1,
|
||||
(errmsg("logical replication could not find row for delete "
|
||||
"in replication target %s",
|
||||
|
|
@ -995,8 +995,8 @@ static void
|
|||
LogicalRepApplyLoop(XLogRecPtr last_received)
|
||||
{
|
||||
/*
|
||||
* Init the ApplyMessageContext which we clean up after each
|
||||
* replication protocol message.
|
||||
* Init the ApplyMessageContext which we clean up after each replication
|
||||
* protocol message.
|
||||
*/
|
||||
ApplyMessageContext = AllocSetContextCreate(ApplyContext,
|
||||
"ApplyMessageContext",
|
||||
|
|
@ -1108,7 +1108,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
|
|||
{
|
||||
/*
|
||||
* If we didn't get any transactions for a while there might be
|
||||
* unconsumed invalidation messages in the queue, consume them now.
|
||||
* unconsumed invalidation messages in the queue, consume them
|
||||
* now.
|
||||
*/
|
||||
AcceptInvalidationMessages();
|
||||
if (!MySubscriptionValid)
|
||||
|
|
@ -1126,6 +1127,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
|
|||
if (endofstream)
|
||||
{
|
||||
TimeLineID tli;
|
||||
|
||||
walrcv_endstreaming(wrconn, &tli);
|
||||
break;
|
||||
}
|
||||
|
|
@ -1152,19 +1154,18 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
|
|||
if (rc & WL_TIMEOUT)
|
||||
{
|
||||
/*
|
||||
* We didn't receive anything new. If we haven't heard
|
||||
* anything from the server for more than
|
||||
* wal_receiver_timeout / 2, ping the server. Also, if
|
||||
* it's been longer than wal_receiver_status_interval
|
||||
* since the last update we sent, send a status update to
|
||||
* the master anyway, to report any progress in applying
|
||||
* WAL.
|
||||
* We didn't receive anything new. If we haven't heard anything
|
||||
* from the server for more than wal_receiver_timeout / 2, ping
|
||||
* the server. Also, if it's been longer than
|
||||
* wal_receiver_status_interval since the last update we sent,
|
||||
* send a status update to the master anyway, to report any
|
||||
* progress in applying WAL.
|
||||
*/
|
||||
bool requestReply = false;
|
||||
|
||||
/*
|
||||
* Check if time since last receive from standby has
|
||||
* reached the configured limit.
|
||||
* Check if time since last receive from standby has reached the
|
||||
* configured limit.
|
||||
*/
|
||||
if (wal_receiver_timeout > 0)
|
||||
{
|
||||
|
|
@ -1180,8 +1181,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
|
|||
(errmsg("terminating logical replication worker due to timeout")));
|
||||
|
||||
/*
|
||||
* We didn't receive anything new, for half of
|
||||
* receiver replication timeout. Ping the server.
|
||||
* We didn't receive anything new, for half of receiver
|
||||
* replication timeout. Ping the server.
|
||||
*/
|
||||
if (!ping_sent)
|
||||
{
|
||||
|
|
@ -1237,8 +1238,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
|
|||
get_flush_position(&writepos, &flushpos, &have_pending_txes);
|
||||
|
||||
/*
|
||||
* No outstanding transactions to flush, we can report the latest
|
||||
* received position. This is important for synchronous replication.
|
||||
* No outstanding transactions to flush, we can report the latest received
|
||||
* position. This is important for synchronous replication.
|
||||
*/
|
||||
if (!have_pending_txes)
|
||||
flushpos = writepos = recvpos;
|
||||
|
|
@ -1263,6 +1264,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
|
|||
if (!reply_message)
|
||||
{
|
||||
MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
|
||||
|
||||
reply_message = makeStringInfo();
|
||||
MemoryContextSwitchTo(oldctx);
|
||||
}
|
||||
|
|
@ -1317,9 +1319,8 @@ reread_subscription(void)
|
|||
newsub = GetSubscription(MyLogicalRepWorker->subid, true);
|
||||
|
||||
/*
|
||||
* Exit if the subscription was removed.
|
||||
* This normally should not happen as the worker gets killed
|
||||
* during DROP SUBSCRIPTION.
|
||||
* Exit if the subscription was removed. This normally should not happen
|
||||
* as the worker gets killed during DROP SUBSCRIPTION.
|
||||
*/
|
||||
if (!newsub)
|
||||
{
|
||||
|
|
@ -1333,9 +1334,8 @@ reread_subscription(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Exit if the subscription was disabled.
|
||||
* This normally should not happen as the worker gets killed
|
||||
* during ALTER SUBSCRIPTION ... DISABLE.
|
||||
* Exit if the subscription was disabled. This normally should not happen
|
||||
* as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE.
|
||||
*/
|
||||
if (!newsub->enabled)
|
||||
{
|
||||
|
|
@ -1349,8 +1349,8 @@ reread_subscription(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Exit if connection string was changed. The launcher will start
|
||||
* new worker.
|
||||
* Exit if connection string was changed. The launcher will start new
|
||||
* worker.
|
||||
*/
|
||||
if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0)
|
||||
{
|
||||
|
|
@ -1382,8 +1382,8 @@ reread_subscription(void)
|
|||
Assert(newsub->slotname);
|
||||
|
||||
/*
|
||||
* We need to make new connection to new slot if slot name has changed
|
||||
* so exit here as well if that's the case.
|
||||
* We need to make new connection to new slot if slot name has changed so
|
||||
* exit here as well if that's the case.
|
||||
*/
|
||||
if (strcmp(newsub->slotname, MySubscription->slotname) != 0)
|
||||
{
|
||||
|
|
@ -1397,8 +1397,8 @@ reread_subscription(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Exit if publication list was changed. The launcher will start
|
||||
* new worker.
|
||||
* Exit if publication list was changed. The launcher will start new
|
||||
* worker.
|
||||
*/
|
||||
if (!equal(newsub->publications, MySubscription->publications))
|
||||
{
|
||||
|
|
@ -1570,9 +1570,8 @@ ApplyWorkerMain(Datum main_arg)
|
|||
(errmsg("could not connect to the publisher: %s", err)));
|
||||
|
||||
/*
|
||||
* We don't really use the output identify_system for anything
|
||||
* but it does some initializations on the upstream so let's still
|
||||
* call it.
|
||||
* We don't really use the output identify_system for anything but it
|
||||
* does some initializations on the upstream so let's still call it.
|
||||
*/
|
||||
(void) walrcv_identify_system(wrconn, &startpointTLI,
|
||||
&server_version);
|
||||
|
|
@ -1580,8 +1579,8 @@ ApplyWorkerMain(Datum main_arg)
|
|||
}
|
||||
|
||||
/*
|
||||
* Setup callback for syscache so that we know when something
|
||||
* changes in the subscription relation state.
|
||||
* Setup callback for syscache so that we know when something changes in
|
||||
* the subscription relation state.
|
||||
*/
|
||||
CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP,
|
||||
invalidate_syncing_table_states,
|
||||
|
|
|
|||
|
|
@ -29,9 +29,9 @@ PG_MODULE_MAGIC;
|
|||
|
||||
extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
|
||||
|
||||
static void pgoutput_startup(LogicalDecodingContext * ctx,
|
||||
static void pgoutput_startup(LogicalDecodingContext *ctx,
|
||||
OutputPluginOptions *opt, bool is_init);
|
||||
static void pgoutput_shutdown(LogicalDecodingContext * ctx);
|
||||
static void pgoutput_shutdown(LogicalDecodingContext *ctx);
|
||||
static void pgoutput_begin_txn(LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn);
|
||||
static void pgoutput_commit_txn(LogicalDecodingContext *ctx,
|
||||
|
|
@ -143,7 +143,7 @@ parse_output_parameters(List *options, uint32 *protocol_version,
|
|||
* Initialize this plugin
|
||||
*/
|
||||
static void
|
||||
pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
|
||||
pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
|
||||
bool is_init)
|
||||
{
|
||||
PGOutputData *data = palloc0(sizeof(PGOutputData));
|
||||
|
|
@ -379,7 +379,7 @@ pgoutput_origin_filter(LogicalDecodingContext *ctx,
|
|||
* of the ctx->context so it will be cleaned up by logical decoding machinery.
|
||||
*/
|
||||
static void
|
||||
pgoutput_shutdown(LogicalDecodingContext * ctx)
|
||||
pgoutput_shutdown(LogicalDecodingContext *ctx)
|
||||
{
|
||||
if (RelationSyncCache)
|
||||
{
|
||||
|
|
@ -397,7 +397,7 @@ LoadPublications(List *pubnames)
|
|||
List *result = NIL;
|
||||
ListCell *lc;
|
||||
|
||||
foreach (lc, pubnames)
|
||||
foreach(lc, pubnames)
|
||||
{
|
||||
char *pubname = (char *) lfirst(lc);
|
||||
Publication *pub = GetPublicationByName(pubname, false);
|
||||
|
|
@ -417,9 +417,8 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
|
|||
publications_valid = false;
|
||||
|
||||
/*
|
||||
* Also invalidate per-relation cache so that next time the filtering
|
||||
* info is checked it will be updated with the new publication
|
||||
* settings.
|
||||
* Also invalidate per-relation cache so that next time the filtering info
|
||||
* is checked it will be updated with the new publication settings.
|
||||
*/
|
||||
rel_sync_cache_publication_cb(arg, cacheid, hashvalue);
|
||||
}
|
||||
|
|
@ -499,9 +498,9 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
|
|||
}
|
||||
|
||||
/*
|
||||
* Build publication cache. We can't use one provided by relcache
|
||||
* as relcache considers all publications given relation is in, but
|
||||
* here we only need to consider ones that the subscriber requested.
|
||||
* Build publication cache. We can't use one provided by relcache as
|
||||
* relcache considers all publications given relation is in, but here
|
||||
* we only need to consider ones that the subscriber requested.
|
||||
*/
|
||||
entry->pubactions.pubinsert = entry->pubactions.pubupdate =
|
||||
entry->pubactions.pubdelete = false;
|
||||
|
|
@ -558,15 +557,14 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
|
|||
* safe point.
|
||||
*
|
||||
* Getting invalidations for relations that aren't in the table is
|
||||
* entirely normal, since there's no way to unregister for an
|
||||
* invalidation event. So we don't care if it's found or not.
|
||||
* entirely normal, since there's no way to unregister for an invalidation
|
||||
* event. So we don't care if it's found or not.
|
||||
*/
|
||||
entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
|
||||
HASH_FIND, NULL);
|
||||
|
||||
/*
|
||||
* Reset schema sent status as the relation definition may have
|
||||
* changed.
|
||||
* Reset schema sent status as the relation definition may have changed.
|
||||
*/
|
||||
if (entry != NULL)
|
||||
entry->schema_sent = false;
|
||||
|
|
@ -590,8 +588,8 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
|
|||
return;
|
||||
|
||||
/*
|
||||
* There is no way to find which entry in our cache the hash belongs to
|
||||
* so mark the whole cache as invalid.
|
||||
* There is no way to find which entry in our cache the hash belongs to so
|
||||
* mark the whole cache as invalid.
|
||||
*/
|
||||
hash_seq_init(&status, RelationSyncCache);
|
||||
while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
|
||||
|
|
|
|||
|
|
@ -502,8 +502,8 @@ ReplicationSlotDropPtr(ReplicationSlot *slot)
|
|||
/*
|
||||
* Rename the slot directory on disk, so that we'll no longer recognize
|
||||
* this as a valid slot. Note that if this fails, we've got to mark the
|
||||
* slot inactive before bailing out. If we're dropping an ephemeral or
|
||||
* a temporary slot, we better never fail hard as the caller won't expect
|
||||
* slot inactive before bailing out. If we're dropping an ephemeral or a
|
||||
* temporary slot, we better never fail hard as the caller won't expect
|
||||
* the slot to survive and this might get called during error handling.
|
||||
*/
|
||||
if (rename(path, tmppath) == 0)
|
||||
|
|
|
|||
|
|
@ -119,11 +119,11 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
|
|||
|
||||
/*
|
||||
* Acquire a logical decoding slot, this will check for conflicting names.
|
||||
* Initially create persistent slot as ephemeral - that allows us to nicely
|
||||
* handle errors during initialization because it'll get dropped if this
|
||||
* transaction fails. We'll make it persistent at the end.
|
||||
* Temporary slots can be created as temporary from beginning as they get
|
||||
* dropped on error as well.
|
||||
* Initially create persistent slot as ephemeral - that allows us to
|
||||
* nicely handle errors during initialization because it'll get dropped if
|
||||
* this transaction fails. We'll make it persistent at the end. Temporary
|
||||
* slots can be created as temporary from beginning as they get dropped on
|
||||
* error as well.
|
||||
*/
|
||||
ReplicationSlotCreate(NameStr(*name), true,
|
||||
temporary ? RS_TEMPORARY : RS_EPHEMERAL);
|
||||
|
|
|
|||
|
|
@ -542,9 +542,9 @@ SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
|
|||
* oldest ones among sync standbys. In a quorum-based, they are the Nth
|
||||
* latest ones.
|
||||
*
|
||||
* SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest positions.
|
||||
* But we use SyncRepGetOldestSyncRecPtr() for that calculation because
|
||||
* it's a bit more efficient.
|
||||
* SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest
|
||||
* positions. But we use SyncRepGetOldestSyncRecPtr() for that calculation
|
||||
* because it's a bit more efficient.
|
||||
*
|
||||
* XXX If the numbers of current and requested sync standbys are the same,
|
||||
* we can use SyncRepGetOldestSyncRecPtr() to calculate the synced
|
||||
|
|
@ -575,10 +575,10 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
|
|||
ListCell *cell;
|
||||
|
||||
/*
|
||||
* Scan through all sync standbys and calculate the oldest
|
||||
* Write, Flush and Apply positions.
|
||||
* Scan through all sync standbys and calculate the oldest Write, Flush
|
||||
* and Apply positions.
|
||||
*/
|
||||
foreach (cell, sync_standbys)
|
||||
foreach(cell, sync_standbys)
|
||||
{
|
||||
WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
|
||||
XLogRecPtr write;
|
||||
|
|
@ -620,7 +620,7 @@ SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
|
|||
flush_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
|
||||
apply_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
|
||||
|
||||
foreach (cell, sync_standbys)
|
||||
foreach(cell, sync_standbys)
|
||||
{
|
||||
WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
|
||||
|
||||
|
|
@ -730,8 +730,8 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync)
|
|||
continue;
|
||||
|
||||
/*
|
||||
* Consider this standby as a candidate for quorum sync standbys
|
||||
* and append it to the result.
|
||||
* Consider this standby as a candidate for quorum sync standbys and
|
||||
* append it to the result.
|
||||
*/
|
||||
result = lappend_int(result, i);
|
||||
if (am_sync != NULL && walsnd == MyWalSnd)
|
||||
|
|
@ -955,8 +955,8 @@ SyncRepGetStandbyPriority(void)
|
|||
return 0;
|
||||
|
||||
/*
|
||||
* In quorum-based sync replication, all the standbys in the list
|
||||
* have the same priority, one.
|
||||
* In quorum-based sync replication, all the standbys in the list have the
|
||||
* same priority, one.
|
||||
*/
|
||||
return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1176,9 +1176,12 @@ XLogWalRcvSendHSFeedback(bool immed)
|
|||
{
|
||||
TimestampTz now;
|
||||
TransactionId nextXid;
|
||||
uint32 xmin_epoch, catalog_xmin_epoch;
|
||||
TransactionId xmin, catalog_xmin;
|
||||
uint32 xmin_epoch,
|
||||
catalog_xmin_epoch;
|
||||
TransactionId xmin,
|
||||
catalog_xmin;
|
||||
static TimestampTz sendTime = 0;
|
||||
|
||||
/* initially true so we always send at least one feedback message */
|
||||
static bool master_has_standby_xmin = true;
|
||||
|
||||
|
|
@ -1211,8 +1214,8 @@ XLogWalRcvSendHSFeedback(bool immed)
|
|||
*
|
||||
* Bailing out here also ensures that we don't send feedback until we've
|
||||
* read our own replication slot state, so we don't tell the master to
|
||||
* discard needed xmin or catalog_xmin from any slots that may exist
|
||||
* on this replica.
|
||||
* discard needed xmin or catalog_xmin from any slots that may exist on
|
||||
* this replica.
|
||||
*/
|
||||
if (!HotStandbyActive())
|
||||
return;
|
||||
|
|
@ -1232,7 +1235,7 @@ XLogWalRcvSendHSFeedback(bool immed)
|
|||
* excludes the catalog_xmin.
|
||||
*/
|
||||
xmin = GetOldestXmin(NULL,
|
||||
PROCARRAY_FLAGS_DEFAULT|PROCARRAY_SLOTS_XMIN);
|
||||
PROCARRAY_FLAGS_DEFAULT | PROCARRAY_SLOTS_XMIN);
|
||||
|
||||
ProcArrayGetReplicationSlotXmin(&slot_xmin, &catalog_xmin);
|
||||
|
||||
|
|
@ -1253,9 +1256,9 @@ XLogWalRcvSendHSFeedback(bool immed)
|
|||
GetNextXidAndEpoch(&nextXid, &xmin_epoch);
|
||||
catalog_xmin_epoch = xmin_epoch;
|
||||
if (nextXid < xmin)
|
||||
xmin_epoch --;
|
||||
xmin_epoch--;
|
||||
if (nextXid < catalog_xmin)
|
||||
catalog_xmin_epoch --;
|
||||
catalog_xmin_epoch--;
|
||||
|
||||
elog(DEBUG2, "sending hot standby feedback xmin %u epoch %u catalog_xmin %u catalog_xmin_epoch %u",
|
||||
xmin, xmin_epoch, catalog_xmin, catalog_xmin_epoch);
|
||||
|
|
|
|||
|
|
@ -580,8 +580,8 @@ StartReplication(StartReplicationCmd *cmd)
|
|||
sendTimeLineIsHistoric = true;
|
||||
|
||||
/*
|
||||
* Check that the timeline the client requested exists, and
|
||||
* the requested start location is on that timeline.
|
||||
* Check that the timeline the client requested exists, and the
|
||||
* requested start location is on that timeline.
|
||||
*/
|
||||
timeLineHistory = readTimeLineHistory(ThisTimeLineID);
|
||||
switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
|
||||
|
|
@ -599,8 +599,8 @@ StartReplication(StartReplicationCmd *cmd)
|
|||
* request to start replication from the beginning of the WAL
|
||||
* segment that contains switchpoint, but on the new timeline, so
|
||||
* that it doesn't end up with a partial segment. If you ask for
|
||||
* too old a starting point, you'll get an error later when we fail
|
||||
* to find the requested WAL segment in pg_wal.
|
||||
* too old a starting point, you'll get an error later when we
|
||||
* fail to find the requested WAL segment in pg_wal.
|
||||
*
|
||||
* XXX: we could be more strict here and only allow a startpoint
|
||||
* that's older than the switchpoint, if it's still in the same
|
||||
|
|
@ -717,9 +717,9 @@ StartReplication(StartReplicationCmd *cmd)
|
|||
MemSet(nulls, false, sizeof(nulls));
|
||||
|
||||
/*
|
||||
* Need a tuple descriptor representing two columns.
|
||||
* int8 may seem like a surprising data type for this, but in theory
|
||||
* int4 would not be wide enough for this, as TimeLineID is unsigned.
|
||||
* Need a tuple descriptor representing two columns. int8 may seem
|
||||
* like a surprising data type for this, but in theory int4 would not
|
||||
* be wide enough for this, as TimeLineID is unsigned.
|
||||
*/
|
||||
tupdesc = CreateTemplateTupleDesc(2, false);
|
||||
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
|
||||
|
|
@ -795,7 +795,7 @@ parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd,
|
|||
bool reserve_wal_given = false;
|
||||
|
||||
/* Parse options */
|
||||
foreach (lc, cmd->options)
|
||||
foreach(lc, cmd->options)
|
||||
{
|
||||
DefElem *defel = (DefElem *) lfirst(lc);
|
||||
|
||||
|
|
@ -1255,8 +1255,8 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId
|
|||
TimestampTz now = GetCurrentTimestamp();
|
||||
|
||||
/*
|
||||
* Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS
|
||||
* to avoid flooding the lag tracker when we commit frequently.
|
||||
* Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
|
||||
* avoid flooding the lag tracker when we commit frequently.
|
||||
*/
|
||||
#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
|
||||
if (!TimestampDifferenceExceeds(sendTime, now,
|
||||
|
|
@ -1474,8 +1474,8 @@ exec_replication_command(const char *cmd_string)
|
|||
SnapBuildClearExportedSnapshot();
|
||||
|
||||
/*
|
||||
* For aborted transactions, don't allow anything except pure SQL,
|
||||
* the exec_simple_query() will handle it correctly.
|
||||
* For aborted transactions, don't allow anything except pure SQL, the
|
||||
* exec_simple_query() will handle it correctly.
|
||||
*/
|
||||
if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd))
|
||||
ereport(ERROR,
|
||||
|
|
@ -1974,8 +1974,8 @@ ProcessStandbyHSFeedbackMessage(void)
|
|||
*
|
||||
* If we're using a replication slot we reserve the xmin via that,
|
||||
* otherwise via the walsender's PGXACT entry. We can only track the
|
||||
* catalog xmin separately when using a slot, so we store the least
|
||||
* of the two provided when not using a slot.
|
||||
* catalog xmin separately when using a slot, so we store the least of the
|
||||
* two provided when not using a slot.
|
||||
*
|
||||
* XXX: It might make sense to generalize the ephemeral slot concept and
|
||||
* always use the slot mechanism to handle the feedback xmin.
|
||||
|
|
@ -2155,8 +2155,8 @@ WalSndLoop(WalSndSendDataCallback send_data)
|
|||
}
|
||||
|
||||
/*
|
||||
* At the reception of SIGUSR2, switch the WAL sender to the stopping
|
||||
* state.
|
||||
* At the reception of SIGUSR2, switch the WAL sender to the
|
||||
* stopping state.
|
||||
*/
|
||||
if (got_SIGUSR2)
|
||||
WalSndSetState(WALSNDSTATE_STOPPING);
|
||||
|
|
@ -2588,18 +2588,18 @@ XLogSendPhysical(void)
|
|||
* it seems good enough to capture the time here. We should reach this
|
||||
* after XLogFlush() runs WalSndWakeupProcessRequests(), and although that
|
||||
* may take some time, we read the WAL flush pointer and take the time
|
||||
* very close to together here so that we'll get a later position if it
|
||||
* is still moving.
|
||||
* very close to together here so that we'll get a later position if it is
|
||||
* still moving.
|
||||
*
|
||||
* Because LagTrackerWriter ignores samples when the LSN hasn't advanced,
|
||||
* this gives us a cheap approximation for the WAL flush time for this
|
||||
* LSN.
|
||||
*
|
||||
* Note that the LSN is not necessarily the LSN for the data contained in
|
||||
* the present message; it's the end of the WAL, which might be
|
||||
* further ahead. All the lag tracking machinery cares about is finding
|
||||
* out when that arbitrary LSN is eventually reported as written, flushed
|
||||
* and applied, so that it can measure the elapsed time.
|
||||
* the present message; it's the end of the WAL, which might be further
|
||||
* ahead. All the lag tracking machinery cares about is finding out when
|
||||
* that arbitrary LSN is eventually reported as written, flushed and
|
||||
* applied, so that it can measure the elapsed time.
|
||||
*/
|
||||
LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp());
|
||||
|
||||
|
|
@ -2758,8 +2758,8 @@ XLogSendLogical(void)
|
|||
if (record != NULL)
|
||||
{
|
||||
/*
|
||||
* Note the lack of any call to LagTrackerWrite() which is handled
|
||||
* by WalSndUpdateProgress which is called by output plugin through
|
||||
* Note the lack of any call to LagTrackerWrite() which is handled by
|
||||
* WalSndUpdateProgress which is called by output plugin through
|
||||
* logical decoding write api.
|
||||
*/
|
||||
LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader);
|
||||
|
|
@ -2805,9 +2805,8 @@ WalSndDone(WalSndSendDataCallback send_data)
|
|||
|
||||
/*
|
||||
* To figure out whether all WAL has successfully been replicated, check
|
||||
* flush location if valid, write otherwise. Tools like pg_receivewal
|
||||
* will usually (unless in synchronous mode) return an invalid flush
|
||||
* location.
|
||||
* flush location if valid, write otherwise. Tools like pg_receivewal will
|
||||
* usually (unless in synchronous mode) return an invalid flush location.
|
||||
*/
|
||||
replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ?
|
||||
MyWalSnd->write : MyWalSnd->flush;
|
||||
|
|
@ -3448,11 +3447,11 @@ LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now)
|
|||
/*
|
||||
* We didn't cross a time. If there is a future sample that we
|
||||
* haven't reached yet, and we've already reached at least one sample,
|
||||
* let's interpolate the local flushed time. This is mainly useful for
|
||||
* reporting a completely stuck apply position as having increasing
|
||||
* lag, since otherwise we'd have to wait for it to eventually start
|
||||
* moving again and cross one of our samples before we can show the
|
||||
* lag increasing.
|
||||
* let's interpolate the local flushed time. This is mainly useful
|
||||
* for reporting a completely stuck apply position as having
|
||||
* increasing lag, since otherwise we'd have to wait for it to
|
||||
* eventually start moving again and cross one of our samples before
|
||||
* we can show the lag increasing.
|
||||
*/
|
||||
if (LagTracker.read_heads[head] != LagTracker.write_head &&
|
||||
LagTracker.last_read[head].time != 0)
|
||||
|
|
|
|||
|
|
@ -409,7 +409,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
|
|||
continue;
|
||||
|
||||
d = (MVDependency *) palloc0(offsetof(MVDependency, attributes)
|
||||
+ k * sizeof(AttrNumber));
|
||||
+k * sizeof(AttrNumber));
|
||||
|
||||
/* copy the dependency (and keep the indexes into stxkeys) */
|
||||
d->degree = degree;
|
||||
|
|
@ -431,7 +431,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
|
|||
dependencies->ndeps++;
|
||||
dependencies = (MVDependencies *) repalloc(dependencies,
|
||||
offsetof(MVDependencies, deps)
|
||||
+ dependencies->ndeps * sizeof(MVDependency));
|
||||
+dependencies->ndeps * sizeof(MVDependency));
|
||||
|
||||
dependencies->deps[dependencies->ndeps - 1] = d;
|
||||
}
|
||||
|
|
@ -451,7 +451,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
|
|||
* Serialize list of dependencies into a bytea value.
|
||||
*/
|
||||
bytea *
|
||||
statext_dependencies_serialize(MVDependencies * dependencies)
|
||||
statext_dependencies_serialize(MVDependencies *dependencies)
|
||||
{
|
||||
int i;
|
||||
bytea *output;
|
||||
|
|
@ -552,7 +552,7 @@ statext_dependencies_deserialize(bytea *data)
|
|||
|
||||
/* allocate space for the MCV items */
|
||||
dependencies = repalloc(dependencies, offsetof(MVDependencies, deps)
|
||||
+ (dependencies->ndeps * sizeof(MVDependency *)));
|
||||
+(dependencies->ndeps * sizeof(MVDependency *)));
|
||||
|
||||
for (i = 0; i < dependencies->ndeps; i++)
|
||||
{
|
||||
|
|
@ -573,7 +573,7 @@ statext_dependencies_deserialize(bytea *data)
|
|||
|
||||
/* now that we know the number of attributes, allocate the dependency */
|
||||
d = (MVDependency *) palloc0(offsetof(MVDependency, attributes)
|
||||
+ (k * sizeof(AttrNumber)));
|
||||
+(k * sizeof(AttrNumber)));
|
||||
|
||||
d->degree = degree;
|
||||
d->nattributes = k;
|
||||
|
|
@ -600,7 +600,7 @@ statext_dependencies_deserialize(bytea *data)
|
|||
* attributes (assuming the clauses are suitable equality clauses)
|
||||
*/
|
||||
static bool
|
||||
dependency_is_fully_matched(MVDependency * dependency, Bitmapset *attnums)
|
||||
dependency_is_fully_matched(MVDependency *dependency, Bitmapset *attnums)
|
||||
{
|
||||
int j;
|
||||
|
||||
|
|
@ -840,7 +840,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum)
|
|||
* (see the comment in dependencies_clauselist_selectivity).
|
||||
*/
|
||||
static MVDependency *
|
||||
find_strongest_dependency(StatisticExtInfo * stats, MVDependencies * dependencies,
|
||||
find_strongest_dependency(StatisticExtInfo *stats, MVDependencies *dependencies,
|
||||
Bitmapset *attnums)
|
||||
{
|
||||
int i;
|
||||
|
|
|
|||
|
|
@ -90,8 +90,8 @@ BuildRelationExtStatistics(Relation onerel, double totalrows,
|
|||
ListCell *lc2;
|
||||
|
||||
/*
|
||||
* Check if we can build these stats based on the column analyzed.
|
||||
* If not, report this fact (except in autovacuum) and move on.
|
||||
* Check if we can build these stats based on the column analyzed. If
|
||||
* not, report this fact (except in autovacuum) and move on.
|
||||
*/
|
||||
stats = lookup_var_attr_stats(onerel, stat->columns,
|
||||
natts, vacattrstats);
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ statext_ndistinct_serialize(MVNDistinct *ndistinct)
|
|||
* for each item, including number of items for each.
|
||||
*/
|
||||
len = VARHDRSZ + SizeOfMVNDistinct +
|
||||
ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) + sizeof(int));
|
||||
ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) +sizeof(int));
|
||||
|
||||
/* and also include space for the actual attribute numbers */
|
||||
for (i = 0; i < ndistinct->nitems; i++)
|
||||
|
|
@ -279,8 +279,8 @@ statext_ndistinct_deserialize(bytea *data)
|
|||
VARSIZE_ANY_EXHDR(data), minimum_size)));
|
||||
|
||||
/*
|
||||
* Allocate space for the ndistinct items (no space for each item's attnos:
|
||||
* those live in bitmapsets allocated separately)
|
||||
* Allocate space for the ndistinct items (no space for each item's
|
||||
* attnos: those live in bitmapsets allocated separately)
|
||||
*/
|
||||
ndistinct = palloc0(MAXALIGN(SizeOfMVNDistinct) +
|
||||
(ndist.nitems * sizeof(MVNDistinctItem)));
|
||||
|
|
@ -449,8 +449,8 @@ ndistinct_for_combination(double totalrows, int numrows, HeapTuple *rows,
|
|||
}
|
||||
|
||||
/*
|
||||
* For each dimension, set up sort-support and fill in the values from
|
||||
* the sample data.
|
||||
* For each dimension, set up sort-support and fill in the values from the
|
||||
* sample data.
|
||||
*/
|
||||
for (i = 0; i < k; i++)
|
||||
{
|
||||
|
|
@ -513,7 +513,7 @@ estimate_ndistinct(double totalrows, int numrows, int d, int f1)
|
|||
denom,
|
||||
ndistinct;
|
||||
|
||||
numer = (double) numrows * (double) d;
|
||||
numer = (double) numrows *(double) d;
|
||||
|
||||
denom = (double) (numrows - f1) +
|
||||
(double) f1 *(double) numrows / totalrows;
|
||||
|
|
@ -594,7 +594,7 @@ generator_init(int n, int k)
|
|||
|
||||
state->ncombinations = n_choose_k(n, k);
|
||||
|
||||
/* pre-allocate space for all combinations*/
|
||||
/* pre-allocate space for all combinations */
|
||||
state->combinations = (int *) palloc(sizeof(int) * k * state->ncombinations);
|
||||
|
||||
state->current = 0;
|
||||
|
|
|
|||
|
|
@ -685,8 +685,8 @@ durable_unlink(const char *fname, int elevel)
|
|||
}
|
||||
|
||||
/*
|
||||
* To guarantee that the removal of the file is persistent, fsync
|
||||
* its parent directory.
|
||||
* To guarantee that the removal of the file is persistent, fsync its
|
||||
* parent directory.
|
||||
*/
|
||||
if (fsync_parent_path(fname, elevel) != 0)
|
||||
return -1;
|
||||
|
|
|
|||
|
|
@ -218,8 +218,8 @@ ConditionVariableBroadcast(ConditionVariable *cv)
|
|||
/*
|
||||
* Let's just do this the dumbest way possible. We could try to dequeue
|
||||
* all the sleepers at once to save spinlock cycles, but it's a bit hard
|
||||
* to get that right in the face of possible sleep cancelations, and
|
||||
* we don't want to loop holding the mutex.
|
||||
* to get that right in the face of possible sleep cancelations, and we
|
||||
* don't want to loop holding the mutex.
|
||||
*/
|
||||
while (ConditionVariableSignal(cv))
|
||||
++nwoken;
|
||||
|
|
|
|||
|
|
@ -1452,6 +1452,7 @@ ProcessUtilitySlow(ParseState *pstate,
|
|||
break;
|
||||
|
||||
case T_RefreshMatViewStmt:
|
||||
|
||||
/*
|
||||
* REFRESH CONCURRENTLY executes some DDL commands internally.
|
||||
* Inhibit DDL command collection here to avoid those commands
|
||||
|
|
@ -1610,6 +1611,7 @@ ProcessUtilitySlow(ParseState *pstate,
|
|||
|
||||
case T_AlterPublicationStmt:
|
||||
AlterPublication((AlterPublicationStmt *) parsetree);
|
||||
|
||||
/*
|
||||
* AlterPublication calls EventTriggerCollectSimpleCommand
|
||||
* directly
|
||||
|
|
|
|||
|
|
@ -284,8 +284,10 @@ jsonb_to_tsvector_byid(PG_FUNCTION_ARGS)
|
|||
|
||||
if (state.result == NULL)
|
||||
{
|
||||
/* There weren't any string elements in jsonb,
|
||||
* so wee need to return an empty vector */
|
||||
/*
|
||||
* There weren't any string elements in jsonb, so wee need to return
|
||||
* an empty vector
|
||||
*/
|
||||
|
||||
if (prs->words != NULL)
|
||||
pfree(prs->words);
|
||||
|
|
@ -328,8 +330,10 @@ json_to_tsvector_byid(PG_FUNCTION_ARGS)
|
|||
PG_FREE_IF_COPY(json, 1);
|
||||
if (state.result == NULL)
|
||||
{
|
||||
/* There weren't any string elements in json,
|
||||
* so wee need to return an empty vector */
|
||||
/*
|
||||
* There weren't any string elements in json, so wee need to return an
|
||||
* empty vector
|
||||
*/
|
||||
|
||||
if (prs->words != NULL)
|
||||
pfree(prs->words);
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ typedef struct HeadlineJsonState
|
|||
bool transformed;
|
||||
} HeadlineJsonState;
|
||||
|
||||
static text * headline_json_value(void *_state, char *elem_value, int elem_len);
|
||||
static text *headline_json_value(void *_state, char *elem_value, int elem_len);
|
||||
|
||||
static void
|
||||
tt_setup_firstcall(FuncCallContext *funcctx, Oid prsid)
|
||||
|
|
|
|||
|
|
@ -279,8 +279,10 @@ cash_in(PG_FUNCTION_ARGS)
|
|||
"money", str)));
|
||||
}
|
||||
|
||||
/* If the value is supposed to be positive, flip the sign, but check for
|
||||
* the most negative number. */
|
||||
/*
|
||||
* If the value is supposed to be positive, flip the sign, but check for
|
||||
* the most negative number.
|
||||
*/
|
||||
if (sgn > 0)
|
||||
{
|
||||
result = -value;
|
||||
|
|
|
|||
|
|
@ -90,8 +90,8 @@ calculate_database_size(Oid dbOid)
|
|||
AclResult aclresult;
|
||||
|
||||
/*
|
||||
* User must have connect privilege for target database
|
||||
* or be a member of pg_read_all_stats
|
||||
* User must have connect privilege for target database or be a member of
|
||||
* pg_read_all_stats
|
||||
*/
|
||||
aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT);
|
||||
if (aclresult != ACLCHECK_OK &&
|
||||
|
|
@ -180,8 +180,8 @@ calculate_tablespace_size(Oid tblspcOid)
|
|||
|
||||
/*
|
||||
* User must be a member of pg_read_all_stats or have CREATE privilege for
|
||||
* target tablespace, either explicitly granted or implicitly because
|
||||
* it is default for current database.
|
||||
* target tablespace, either explicitly granted or implicitly because it
|
||||
* is default for current database.
|
||||
*/
|
||||
if (tblspcOid != MyDatabaseTableSpace &&
|
||||
!is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS))
|
||||
|
|
|
|||
|
|
@ -1449,7 +1449,7 @@ str_numth(char *dest, char *num, int type)
|
|||
|
||||
#ifdef USE_ICU
|
||||
|
||||
typedef int32_t (*ICU_Convert_Func)(UChar *dest, int32_t destCapacity,
|
||||
typedef int32_t (*ICU_Convert_Func) (UChar *dest, int32_t destCapacity,
|
||||
const UChar *src, int32_t srcLength,
|
||||
const char *locale,
|
||||
UErrorCode *pErrorCode);
|
||||
|
|
@ -1592,7 +1592,10 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
|
|||
workspace[curr_char] = towlower(workspace[curr_char]);
|
||||
}
|
||||
|
||||
/* Make result large enough; case change might change number of bytes */
|
||||
/*
|
||||
* Make result large enough; case change might change number
|
||||
* of bytes
|
||||
*/
|
||||
result_size = curr_char * pg_database_encoding_max_length() + 1;
|
||||
result = palloc(result_size);
|
||||
|
||||
|
|
@ -1607,11 +1610,11 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
|
|||
result = pnstrdup(buff, nbytes);
|
||||
|
||||
/*
|
||||
* Note: we assume that tolower_l() will not be so broken as to need
|
||||
* an isupper_l() guard test. When using the default collation, we
|
||||
* apply the traditional Postgres behavior that forces ASCII-style
|
||||
* treatment of I/i, but in non-default collations you get exactly
|
||||
* what the collation says.
|
||||
* Note: we assume that tolower_l() will not be so broken as
|
||||
* to need an isupper_l() guard test. When using the default
|
||||
* collation, we apply the traditional Postgres behavior that
|
||||
* forces ASCII-style treatment of I/i, but in non-default
|
||||
* collations you get exactly what the collation says.
|
||||
*/
|
||||
for (p = result; *p; p++)
|
||||
{
|
||||
|
|
@ -1672,7 +1675,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
|
|||
#ifdef USE_ICU
|
||||
if (mylocale && mylocale->provider == COLLPROVIDER_ICU)
|
||||
{
|
||||
int32_t len_uchar, len_conv;
|
||||
int32_t len_uchar,
|
||||
len_conv;
|
||||
UChar *buff_uchar;
|
||||
UChar *buff_conv;
|
||||
|
||||
|
|
@ -1711,7 +1715,10 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
|
|||
workspace[curr_char] = towupper(workspace[curr_char]);
|
||||
}
|
||||
|
||||
/* Make result large enough; case change might change number of bytes */
|
||||
/*
|
||||
* Make result large enough; case change might change number
|
||||
* of bytes
|
||||
*/
|
||||
result_size = curr_char * pg_database_encoding_max_length() + 1;
|
||||
result = palloc(result_size);
|
||||
|
||||
|
|
@ -1726,11 +1733,11 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
|
|||
result = pnstrdup(buff, nbytes);
|
||||
|
||||
/*
|
||||
* Note: we assume that toupper_l() will not be so broken as to need
|
||||
* an islower_l() guard test. When using the default collation, we
|
||||
* apply the traditional Postgres behavior that forces ASCII-style
|
||||
* treatment of I/i, but in non-default collations you get exactly
|
||||
* what the collation says.
|
||||
* Note: we assume that toupper_l() will not be so broken as
|
||||
* to need an islower_l() guard test. When using the default
|
||||
* collation, we apply the traditional Postgres behavior that
|
||||
* forces ASCII-style treatment of I/i, but in non-default
|
||||
* collations you get exactly what the collation says.
|
||||
*/
|
||||
for (p = result; *p; p++)
|
||||
{
|
||||
|
|
@ -1792,7 +1799,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
|
|||
#ifdef USE_ICU
|
||||
if (mylocale && mylocale->provider == COLLPROVIDER_ICU)
|
||||
{
|
||||
int32_t len_uchar, len_conv;
|
||||
int32_t len_uchar,
|
||||
len_conv;
|
||||
UChar *buff_uchar;
|
||||
UChar *buff_conv;
|
||||
|
||||
|
|
@ -1843,7 +1851,10 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
|
|||
}
|
||||
}
|
||||
|
||||
/* Make result large enough; case change might change number of bytes */
|
||||
/*
|
||||
* Make result large enough; case change might change number
|
||||
* of bytes
|
||||
*/
|
||||
result_size = curr_char * pg_database_encoding_max_length() + 1;
|
||||
result = palloc(result_size);
|
||||
|
||||
|
|
@ -1858,11 +1869,11 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
|
|||
result = pnstrdup(buff, nbytes);
|
||||
|
||||
/*
|
||||
* Note: we assume that toupper_l()/tolower_l() will not be so broken
|
||||
* as to need guard tests. When using the default collation, we apply
|
||||
* the traditional Postgres behavior that forces ASCII-style treatment
|
||||
* of I/i, but in non-default collations you get exactly what the
|
||||
* collation says.
|
||||
* Note: we assume that toupper_l()/tolower_l() will not be so
|
||||
* broken as to need guard tests. When using the default
|
||||
* collation, we apply the traditional Postgres behavior that
|
||||
* forces ASCII-style treatment of I/i, but in non-default
|
||||
* collations you get exactly what the collation says.
|
||||
*/
|
||||
for (p = result; *p; p++)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -57,8 +57,8 @@ typedef struct OkeysState
|
|||
typedef struct IterateJsonStringValuesState
|
||||
{
|
||||
JsonLexContext *lex;
|
||||
JsonIterateStringValuesAction action; /* an action that will be applied
|
||||
to each json value */
|
||||
JsonIterateStringValuesAction action; /* an action that will be
|
||||
* applied to each json value */
|
||||
void *action_state; /* any necessary context for iteration */
|
||||
} IterateJsonStringValuesState;
|
||||
|
||||
|
|
@ -67,8 +67,8 @@ typedef struct TransformJsonStringValuesState
|
|||
{
|
||||
JsonLexContext *lex;
|
||||
StringInfo strval; /* resulting json */
|
||||
JsonTransformStringValuesAction action; /* an action that will be applied
|
||||
to each json value */
|
||||
JsonTransformStringValuesAction action; /* an action that will be
|
||||
* applied to each json value */
|
||||
void *action_state; /* any necessary context for transformation */
|
||||
} TransformJsonStringValuesState;
|
||||
|
||||
|
|
@ -163,8 +163,8 @@ typedef struct ArrayIOData
|
|||
typedef struct CompositeIOData
|
||||
{
|
||||
/*
|
||||
* We use pointer to a RecordIOData here because variable-length
|
||||
* struct RecordIOData can't be used directly in ColumnIOData.io union
|
||||
* We use pointer to a RecordIOData here because variable-length struct
|
||||
* RecordIOData can't be used directly in ColumnIOData.io union
|
||||
*/
|
||||
RecordIOData *record_io; /* metadata cache for populate_record() */
|
||||
TupleDesc tupdesc; /* cached tuple descriptor */
|
||||
|
|
@ -203,7 +203,8 @@ struct ColumnIOData
|
|||
ArrayIOData array;
|
||||
CompositeIOData composite;
|
||||
DomainIOData domain;
|
||||
} io; /* metadata cache for various column type categories */
|
||||
} io; /* metadata cache for various column type
|
||||
* categories */
|
||||
};
|
||||
|
||||
/* structure to cache record metadata needed for populate_record() */
|
||||
|
|
@ -257,7 +258,8 @@ typedef struct PopulateArrayState
|
|||
JsonLexContext *lex; /* json lexer */
|
||||
PopulateArrayContext *ctx; /* context */
|
||||
char *element_start; /* start of the current array element */
|
||||
char *element_scalar; /* current array element token if it is a scalar */
|
||||
char *element_scalar; /* current array element token if it is a
|
||||
* scalar */
|
||||
JsonTokenType element_type; /* current array element type */
|
||||
} PopulateArrayState;
|
||||
|
||||
|
|
@ -425,7 +427,7 @@ static void prepare_column_cache(ColumnIOData *column, Oid typid, int32 typmod,
|
|||
static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod,
|
||||
const char *colname, MemoryContext mcxt, Datum defaultval,
|
||||
JsValue *jsv, bool *isnull);
|
||||
static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns);
|
||||
static RecordIOData *allocate_record_info(MemoryContext mcxt, int ncolumns);
|
||||
static bool JsObjectGetField(JsObject *obj, char *field, JsValue *jsv);
|
||||
static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj);
|
||||
static void populate_array_json(PopulateArrayContext *ctx, char *json, int len);
|
||||
|
|
@ -2567,9 +2569,9 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */
|
|||
tok = JsonbIteratorNext(&it, &val, true);
|
||||
|
||||
/*
|
||||
* If the number of dimensions is not yet known and
|
||||
* we have found end of the array, or the first child element is not
|
||||
* an array, then assign the number of dimensions now.
|
||||
* If the number of dimensions is not yet known and we have found end of
|
||||
* the array, or the first child element is not an array, then assign the
|
||||
* number of dimensions now.
|
||||
*/
|
||||
if (ctx->ndims <= 0 &&
|
||||
(tok == WJB_END_ARRAY ||
|
||||
|
|
@ -2585,8 +2587,8 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */
|
|||
while (tok == WJB_ELEM)
|
||||
{
|
||||
/*
|
||||
* Recurse only if the dimensions of dimensions is still unknown or
|
||||
* if it is not the innermost dimension.
|
||||
* Recurse only if the dimensions of dimensions is still unknown or if
|
||||
* it is not the innermost dimension.
|
||||
*/
|
||||
if (ctx->ndims > 0 && ndim >= ctx->ndims)
|
||||
populate_array_element(ctx, ndim, &jsv);
|
||||
|
|
@ -2750,8 +2752,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
|
|||
jsv->val.json.type == JSON_TOKEN_STRING)
|
||||
{
|
||||
/*
|
||||
* Add quotes around string value (should be already escaped)
|
||||
* if converting to json/jsonb.
|
||||
* Add quotes around string value (should be already escaped) if
|
||||
* converting to json/jsonb.
|
||||
*/
|
||||
|
||||
if (len < 0)
|
||||
|
|
@ -2780,6 +2782,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
|
|||
if (typid == JSONBOID)
|
||||
{
|
||||
Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */
|
||||
|
||||
return JsonbGetDatum(jsonb);
|
||||
}
|
||||
/* convert jsonb to string for typio call */
|
||||
|
|
@ -2790,6 +2793,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
|
|||
* to json string, preserving quotes around top-level strings.
|
||||
*/
|
||||
Jsonb *jsonb = JsonbValueToJsonb(jbv);
|
||||
|
||||
str = JsonbToCString(NULL, &jsonb->root, VARSIZE(jsonb));
|
||||
}
|
||||
else if (jbv->type == jbvString) /* quotes are stripped */
|
||||
|
|
@ -3017,9 +3021,9 @@ populate_record(TupleDesc tupdesc,
|
|||
int i;
|
||||
|
||||
/*
|
||||
* if the input json is empty, we can only skip the rest if we were
|
||||
* passed in a non-null record, since otherwise there may be issues
|
||||
* with domain nulls.
|
||||
* if the input json is empty, we can only skip the rest if we were passed
|
||||
* in a non-null record, since otherwise there may be issues with domain
|
||||
* nulls.
|
||||
*/
|
||||
if (defaultval && JsObjectIsEmpty(obj))
|
||||
return defaultval;
|
||||
|
|
@ -3069,7 +3073,7 @@ populate_record(TupleDesc tupdesc,
|
|||
{
|
||||
Form_pg_attribute att = tupdesc->attrs[i];
|
||||
char *colname = NameStr(att->attname);
|
||||
JsValue field = { 0 };
|
||||
JsValue field = {0};
|
||||
bool found;
|
||||
|
||||
/* Ignore dropped columns in datatype */
|
||||
|
|
@ -3116,7 +3120,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
|
|||
{
|
||||
int json_arg_num = have_record_arg ? 1 : 0;
|
||||
Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
|
||||
JsValue jsv = { 0 };
|
||||
JsValue jsv = {0};
|
||||
HeapTupleHeader rec = NULL;
|
||||
Oid tupType;
|
||||
int32 tupTypmod;
|
||||
|
|
@ -3210,7 +3214,8 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
|
|||
|
||||
jsv.val.json.str = VARDATA_ANY(json);
|
||||
jsv.val.json.len = VARSIZE_ANY_EXHDR(json);
|
||||
jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */
|
||||
jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in
|
||||
* populate_composite() */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -4837,6 +4842,7 @@ static void
|
|||
iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype)
|
||||
{
|
||||
IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state;
|
||||
|
||||
if (tokentype == JSON_TOKEN_STRING)
|
||||
(*_state->action) (_state->action_state, token, strlen(token));
|
||||
}
|
||||
|
|
@ -4852,7 +4858,8 @@ transform_jsonb_string_values(Jsonb *jsonb, void *action_state,
|
|||
JsonTransformStringValuesAction transform_action)
|
||||
{
|
||||
JsonbIterator *it;
|
||||
JsonbValue v, *res = NULL;
|
||||
JsonbValue v,
|
||||
*res = NULL;
|
||||
JsonbIteratorToken type;
|
||||
JsonbParseState *st = NULL;
|
||||
text *out;
|
||||
|
|
@ -4928,6 +4935,7 @@ static void
|
|||
transform_string_values_object_start(void *state)
|
||||
{
|
||||
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
|
||||
|
||||
appendStringInfoCharMacro(_state->strval, '{');
|
||||
}
|
||||
|
||||
|
|
@ -4935,6 +4943,7 @@ static void
|
|||
transform_string_values_object_end(void *state)
|
||||
{
|
||||
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
|
||||
|
||||
appendStringInfoCharMacro(_state->strval, '}');
|
||||
}
|
||||
|
||||
|
|
@ -4942,6 +4951,7 @@ static void
|
|||
transform_string_values_array_start(void *state)
|
||||
{
|
||||
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
|
||||
|
||||
appendStringInfoCharMacro(_state->strval, '[');
|
||||
}
|
||||
|
||||
|
|
@ -4949,6 +4959,7 @@ static void
|
|||
transform_string_values_array_end(void *state)
|
||||
{
|
||||
TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
|
||||
|
||||
appendStringInfoCharMacro(_state->strval, ']');
|
||||
}
|
||||
|
||||
|
|
@ -4985,6 +4996,7 @@ transform_string_values_scalar(void *state, char *token, JsonTokenType tokentype
|
|||
if (tokentype == JSON_TOKEN_STRING)
|
||||
{
|
||||
text *out = (*_state->action) (_state->action_state, token, strlen(token));
|
||||
|
||||
escape_json(_state->strval, text_to_cstring(out));
|
||||
}
|
||||
else
|
||||
|
|
|
|||
|
|
@ -189,9 +189,9 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
|
|||
/*
|
||||
* For efficiency reasons, in the single byte case we don't call lower()
|
||||
* on the pattern and text, but instead call SB_lower_char on each
|
||||
* character. In the multi-byte case we don't have much choice :-(.
|
||||
* Also, ICU does not support single-character case folding, so we go the
|
||||
* long way.
|
||||
* character. In the multi-byte case we don't have much choice :-(. Also,
|
||||
* ICU does not support single-character case folding, so we go the long
|
||||
* way.
|
||||
*/
|
||||
|
||||
if (pg_database_encoding_max_length() > 1 || (locale && locale->provider == COLLPROVIDER_ICU))
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ invalid_input:
|
|||
Datum
|
||||
macaddr8_in(PG_FUNCTION_ARGS)
|
||||
{
|
||||
const unsigned char *str = (unsigned char*) PG_GETARG_CSTRING(0);
|
||||
const unsigned char *str = (unsigned char *) PG_GETARG_CSTRING(0);
|
||||
const unsigned char *ptr = str;
|
||||
macaddr8 *result;
|
||||
unsigned char a = 0,
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user