Run pgindent on 9.2 source tree in preparation for first 9.3

commit-fest.
This commit is contained in:
Bruce Momjian 2012-06-10 15:20:04 -04:00
parent 60801944fa
commit 927d61eeff
494 changed files with 7343 additions and 7046 deletions

View File

@ -429,8 +429,8 @@ fileGetForeignRelSize(PlannerInfo *root,
FileFdwPlanState *fdw_private; FileFdwPlanState *fdw_private;
/* /*
* Fetch options. We only need filename at this point, but we might * Fetch options. We only need filename at this point, but we might as
* as well get everything and not need to re-fetch it later in planning. * well get everything and not need to re-fetch it later in planning.
*/ */
fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState)); fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState));
fileGetOptions(foreigntableid, fileGetOptions(foreigntableid,
@ -474,7 +474,8 @@ fileGetForeignPaths(PlannerInfo *root,
/* /*
* If data file was sorted, and we knew it somehow, we could insert * If data file was sorted, and we knew it somehow, we could insert
* appropriate pathkeys into the ForeignPath node to tell the planner that. * appropriate pathkeys into the ForeignPath node to tell the planner
* that.
*/ */
} }
@ -671,8 +672,8 @@ fileAnalyzeForeignTable(Relation relation,
fileGetOptions(RelationGetRelid(relation), &filename, &options); fileGetOptions(RelationGetRelid(relation), &filename, &options);
/* /*
* Get size of the file. (XXX if we fail here, would it be better to * Get size of the file. (XXX if we fail here, would it be better to just
* just return false to skip analyzing the table?) * return false to skip analyzing the table?)
*/ */
if (stat(filename, &stat_buf) < 0) if (stat(filename, &stat_buf) < 0)
ereport(ERROR, ereport(ERROR,
@ -853,8 +854,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
cstate = BeginCopyFrom(onerel, filename, NIL, options); cstate = BeginCopyFrom(onerel, filename, NIL, options);
/* /*
* Use per-tuple memory context to prevent leak of memory used to read rows * Use per-tuple memory context to prevent leak of memory used to read
* from the file with Copy routines. * rows from the file with Copy routines.
*/ */
tupcontext = AllocSetContextCreate(CurrentMemoryContext, tupcontext = AllocSetContextCreate(CurrentMemoryContext,
"file_fdw temporary context", "file_fdw temporary context",
@ -912,8 +913,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
if (rowstoskip <= 0) if (rowstoskip <= 0)
{ {
/* /*
* Found a suitable tuple, so save it, replacing one * Found a suitable tuple, so save it, replacing one old tuple
* old tuple at random * at random
*/ */
int k = (int) (targrows * anl_random_fract()); int k = (int) (targrows * anl_random_fract());

View File

@ -140,8 +140,9 @@ CleanupPriorWALFiles(void)
strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0) strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
{ {
/* /*
* Use the original file name again now, including any extension * Use the original file name again now, including any
* that might have been chopped off before testing the sequence. * extension that might have been chopped off before testing
* the sequence.
*/ */
snprintf(WALFilePath, MAXPGPATH, "%s/%s", snprintf(WALFilePath, MAXPGPATH, "%s/%s",
archiveLocation, xlde->d_name); archiveLocation, xlde->d_name);
@ -298,7 +299,8 @@ main(int argc, char **argv)
dryrun = true; dryrun = true;
break; break;
case 'x': case 'x':
additional_ext = optarg; /* Extension to remove from xlogfile names */ additional_ext = optarg; /* Extension to remove from
* xlogfile names */
break; break;
default: default:
fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);

View File

@ -248,21 +248,21 @@ static uint32 pgss_hash_string(const char *str);
static void pgss_store(const char *query, uint32 queryId, static void pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows, double total_time, uint64 rows,
const BufferUsage *bufusage, const BufferUsage *bufusage,
pgssJumbleState * jstate); pgssJumbleState *jstate);
static Size pgss_memsize(void); static Size pgss_memsize(void);
static pgssEntry *entry_alloc(pgssHashKey *key, const char *query, static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
int query_len, bool sticky); int query_len, bool sticky);
static void entry_dealloc(void); static void entry_dealloc(void);
static void entry_reset(void); static void entry_reset(void);
static void AppendJumble(pgssJumbleState * jstate, static void AppendJumble(pgssJumbleState *jstate,
const unsigned char *item, Size size); const unsigned char *item, Size size);
static void JumbleQuery(pgssJumbleState * jstate, Query *query); static void JumbleQuery(pgssJumbleState *jstate, Query *query);
static void JumbleRangeTable(pgssJumbleState * jstate, List *rtable); static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
static void JumbleExpr(pgssJumbleState * jstate, Node *node); static void JumbleExpr(pgssJumbleState *jstate, Node *node);
static void RecordConstLocation(pgssJumbleState * jstate, int location); static void RecordConstLocation(pgssJumbleState *jstate, int location);
static char *generate_normalized_query(pgssJumbleState * jstate, const char *query, static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding); int *query_len_p, int encoding);
static void fill_in_constant_lengths(pgssJumbleState * jstate, const char *query); static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query);
static int comp_location(const void *a, const void *b); static int comp_location(const void *a, const void *b);
@ -513,8 +513,8 @@ pgss_shmem_startup(void)
FreeFile(file); FreeFile(file);
/* /*
* Remove the file so it's not included in backups/replication * Remove the file so it's not included in backups/replication slaves,
* slaves, etc. A new file will be written on next shutdown. * etc. A new file will be written on next shutdown.
*/ */
unlink(PGSS_DUMP_FILE); unlink(PGSS_DUMP_FILE);
@ -626,8 +626,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
* the statement contains an optimizable statement for which a queryId * the statement contains an optimizable statement for which a queryId
* could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases, * could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases,
* runtime control will first go through ProcessUtility and then the * runtime control will first go through ProcessUtility and then the
* executor, and we don't want the executor hooks to do anything, since * executor, and we don't want the executor hooks to do anything, since we
* we are already measuring the statement's costs at the utility level. * are already measuring the statement's costs at the utility level.
*/ */
if (query->utilityStmt) if (query->utilityStmt)
{ {
@ -789,10 +789,9 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
DestReceiver *dest, char *completionTag) DestReceiver *dest, char *completionTag)
{ {
/* /*
* If it's an EXECUTE statement, we don't track it and don't increment * If it's an EXECUTE statement, we don't track it and don't increment the
* the nesting level. This allows the cycles to be charged to the * nesting level. This allows the cycles to be charged to the underlying
* underlying PREPARE instead (by the Executor hooks), which is much more * PREPARE instead (by the Executor hooks), which is much more useful.
* useful.
* *
* We also don't track execution of PREPARE. If we did, we would get one * We also don't track execution of PREPARE. If we did, we would get one
* hash table entry for the PREPARE (with hash calculated from the query * hash table entry for the PREPARE (with hash calculated from the query
@ -942,7 +941,7 @@ static void
pgss_store(const char *query, uint32 queryId, pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows, double total_time, uint64 rows,
const BufferUsage *bufusage, const BufferUsage *bufusage,
pgssJumbleState * jstate) pgssJumbleState *jstate)
{ {
pgssHashKey key; pgssHashKey key;
pgssEntry *entry; pgssEntry *entry;
@ -1355,7 +1354,7 @@ entry_reset(void)
* the current jumble. * the current jumble.
*/ */
static void static void
AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size) AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size)
{ {
unsigned char *jumble = jstate->jumble; unsigned char *jumble = jstate->jumble;
Size jumble_len = jstate->jumble_len; Size jumble_len = jstate->jumble_len;
@ -1404,7 +1403,7 @@ AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
* of information). * of information).
*/ */
static void static void
JumbleQuery(pgssJumbleState * jstate, Query *query) JumbleQuery(pgssJumbleState *jstate, Query *query)
{ {
Assert(IsA(query, Query)); Assert(IsA(query, Query));
Assert(query->utilityStmt == NULL); Assert(query->utilityStmt == NULL);
@ -1431,7 +1430,7 @@ JumbleQuery(pgssJumbleState * jstate, Query *query)
* Jumble a range table * Jumble a range table
*/ */
static void static void
JumbleRangeTable(pgssJumbleState * jstate, List *rtable) JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
{ {
ListCell *lc; ListCell *lc;
@ -1489,7 +1488,7 @@ JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
* about any unrecognized node type. * about any unrecognized node type.
*/ */
static void static void
JumbleExpr(pgssJumbleState * jstate, Node *node) JumbleExpr(pgssJumbleState *jstate, Node *node)
{ {
ListCell *temp; ListCell *temp;
@ -1874,7 +1873,7 @@ JumbleExpr(pgssJumbleState * jstate, Node *node)
* that is currently being walked. * that is currently being walked.
*/ */
static void static void
RecordConstLocation(pgssJumbleState * jstate, int location) RecordConstLocation(pgssJumbleState *jstate, int location)
{ {
/* -1 indicates unknown or undefined location */ /* -1 indicates unknown or undefined location */
if (location >= 0) if (location >= 0)
@ -1909,7 +1908,7 @@ RecordConstLocation(pgssJumbleState * jstate, int location)
* Returns a palloc'd string, which is not necessarily null-terminated. * Returns a palloc'd string, which is not necessarily null-terminated.
*/ */
static char * static char *
generate_normalized_query(pgssJumbleState * jstate, const char *query, generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding) int *query_len_p, int encoding)
{ {
char *norm_query; char *norm_query;
@ -2015,7 +2014,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
* reason for a constant to start with a '-'. * reason for a constant to start with a '-'.
*/ */
static void static void
fill_in_constant_lengths(pgssJumbleState * jstate, const char *query) fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
{ {
pgssLocationLen *locs; pgssLocationLen *locs;
core_yyscan_t yyscanner; core_yyscan_t yyscanner;

View File

@ -77,6 +77,7 @@ static void test_sync(int writes_per_op);
static void test_open_syncs(void); static void test_open_syncs(void);
static void test_open_sync(const char *msg, int writes_size); static void test_open_sync(const char *msg, int writes_size);
static void test_file_descriptor_sync(void); static void test_file_descriptor_sync(void);
#ifndef WIN32 #ifndef WIN32
static void process_alarm(int sig); static void process_alarm(int sig);
#else #else

View File

@ -101,10 +101,16 @@ test_timing(int32 duration)
uint64 total_time; uint64 total_time;
int64 time_elapsed = 0; int64 time_elapsed = 0;
uint64 loop_count = 0; uint64 loop_count = 0;
uint64 prev, cur; uint64 prev,
int32 diff, i, bits, found; cur;
int32 diff,
i,
bits,
found;
instr_time start_time, end_time, temp; instr_time start_time,
end_time,
temp;
static int64 histogram[32]; static int64 histogram[32];
char buf[100]; char buf[100];

View File

@ -199,9 +199,9 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
* trigram extraction is relatively CPU-expensive. We must include * trigram extraction is relatively CPU-expensive. We must include
* strategy number because trigram extraction depends on strategy. * strategy number because trigram extraction depends on strategy.
* *
* The cached structure contains the strategy number, then the input * The cached structure contains the strategy number, then the input query
* query (starting at a MAXALIGN boundary), then the TRGM value (also * (starting at a MAXALIGN boundary), then the TRGM value (also starting
* starting at a MAXALIGN boundary). * at a MAXALIGN boundary).
*/ */
if (cache == NULL || if (cache == NULL ||
strategy != *((StrategyNumber *) cache) || strategy != *((StrategyNumber *) cache) ||
@ -341,8 +341,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
char *cache = (char *) fcinfo->flinfo->fn_extra; char *cache = (char *) fcinfo->flinfo->fn_extra;
/* /*
* Cache the generated trigrams across multiple calls with the same * Cache the generated trigrams across multiple calls with the same query.
* query.
*/ */
if (cache == NULL || if (cache == NULL ||
VARSIZE(cache) != querysize || VARSIZE(cache) != querysize ||

View File

@ -238,7 +238,8 @@ check_cluster_versions(void)
/* /*
* We can't allow downgrading because we use the target pg_dumpall, and * We can't allow downgrading because we use the target pg_dumpall, and
* pg_dumpall cannot operate on new database versions, only older versions. * pg_dumpall cannot operate on new database versions, only older
* versions.
*/ */
if (old_cluster.major_version > new_cluster.major_version) if (old_cluster.major_version > new_cluster.major_version)
pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n"); pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
@ -764,9 +765,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
PGconn *conn = connectToServer(cluster, active_db->db_name); PGconn *conn = connectToServer(cluster, active_db->db_name);
/* /*
* While several relkinds don't store any data, e.g. views, they * While several relkinds don't store any data, e.g. views, they can
* can be used to define data types of other columns, so we * be used to define data types of other columns, so we check all
* check all relkinds. * relkinds.
*/ */
res = executeQueryOrDie(conn, res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname " "SELECT n.nspname, c.relname, a.attname "
@ -836,9 +837,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
static void static void
get_bin_version(ClusterInfo *cluster) get_bin_version(ClusterInfo *cluster)
{ {
char cmd[MAXPGPATH], cmd_output[MAX_STRING]; char cmd[MAXPGPATH],
cmd_output[MAX_STRING];
FILE *output; FILE *output;
int pre_dot, post_dot; int pre_dot,
post_dot;
snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir); snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir);
@ -858,4 +861,3 @@ get_bin_version(ClusterInfo *cluster)
cluster->bin_version = (pre_dot * 100 + post_dot) * 100; cluster->bin_version = (pre_dot * 100 + post_dot) * 100;
} }

View File

@ -129,6 +129,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
pg_log(PG_VERBOSE, "%s", bufin); pg_log(PG_VERBOSE, "%s", bufin);
#ifdef WIN32 #ifdef WIN32
/* /*
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does * Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a * work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a

View File

@ -18,6 +18,7 @@
static void check_data_dir(const char *pg_data); static void check_data_dir(const char *pg_data);
static void check_bin_dir(ClusterInfo *cluster); static void check_bin_dir(ClusterInfo *cluster);
static void validate_exec(const char *dir, const char *cmdName); static void validate_exec(const char *dir, const char *cmdName);
#ifdef WIN32 #ifdef WIN32
static int win32_check_directory_write_permissions(void); static int win32_check_directory_write_permissions(void);
#endif #endif

View File

@ -233,7 +233,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
* large number of times. * large number of times.
*/ */
int int
load_directory(const char *dirname, struct dirent ***namelist) load_directory(const char *dirname, struct dirent *** namelist)
{ {
DIR *dirdesc; DIR *dirdesc;
struct dirent *direntry; struct dirent *direntry;
@ -314,7 +314,6 @@ win32_pghardlink(const char *src, const char *dst)
else else
return 0; return 0;
} }
#endif #endif
@ -330,5 +329,3 @@ fopen_priv(const char *path, const char *mode)
return fp; return fp;
} }

View File

@ -144,8 +144,8 @@ get_loadable_libraries(void)
PGconn *conn = connectToServer(&old_cluster, active_db->db_name); PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
/* /*
* Fetch all libraries referenced in this DB. We can't exclude * Fetch all libraries referenced in this DB. We can't exclude the
* the "pg_catalog" schema because, while such functions are not * "pg_catalog" schema because, while such functions are not
* explicitly dumped by pg_dump, they do reference implicit objects * explicitly dumped by pg_dump, they do reference implicit objects
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl. * that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
*/ */
@ -164,8 +164,8 @@ get_loadable_libraries(void)
* pg_dumpall to dump it. However that function still references * pg_dumpall to dump it. However that function still references
* "plpython" (no "2"), so it throws an error on restore. This code * "plpython" (no "2"), so it throws an error on restore. This code
* checks for the problem function, reports affected databases to the * checks for the problem function, reports affected databases to the
* user and explains how to remove them. * user and explains how to remove them. 8.1 git commit:
* 8.1 git commit: e0dedd0559f005d60c69c9772163e69c204bac69 * e0dedd0559f005d60c69c9772163e69c204bac69
* http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
* http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
*/ */
@ -294,16 +294,16 @@ check_loadable_libraries(void)
/* /*
* In Postgres 9.0, Python 3 support was added, and to do that, a * In Postgres 9.0, Python 3 support was added, and to do that, a
* plpython2u language was created with library name plpython2.so * plpython2u language was created with library name plpython2.so as a
* as a symbolic link to plpython.so. In Postgres 9.1, only the * symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and * plpython2.so library was created, and both plpythonu and plpython2u
* plpython2u pointing to it. For this reason, any reference to * pointing to it. For this reason, any reference to library name
* library name "plpython" in an old PG <= 9.1 cluster must look * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
* for "plpython2" in the new cluster. * the new cluster.
* *
* For this case, we could check pg_pltemplate, but that only works * For this case, we could check pg_pltemplate, but that only works
* for languages, and does not help with function shared objects, * for languages, and does not help with function shared objects, so
* so we just do a general fix. * we just do a general fix.
*/ */
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
strcmp(lib, "$libdir/plpython") == 0) strcmp(lib, "$libdir/plpython") == 0)

View File

@ -57,12 +57,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
old_db->db_name, old_rel->reloid, new_rel->reloid); old_db->db_name, old_rel->reloid, new_rel->reloid);
/* /*
* TOAST table names initially match the heap pg_class oid. * TOAST table names initially match the heap pg_class oid. In
* In pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST
* TOAST table names change during ALTER TABLE ALTER COLUMN SET TYPE. * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
* In >= 9.0, TOAST relation names always use heap table oids, hence * 9.0, TOAST relation names always use heap table oids, hence we
* we cannot check relation names when upgrading from pre-9.0. * cannot check relation names when upgrading from pre-9.0. Clusters
* Clusters upgraded to 9.0 will get matching TOAST names. * upgraded to 9.0 will get matching TOAST names.
*/ */
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 || ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||

View File

@ -139,9 +139,9 @@ parseCommandLine(int argc, char *argv[])
break; break;
/* /*
* Someday, the port number option could be removed and * Someday, the port number option could be removed and passed
* passed using -o/-O, but that requires postmaster -C * using -o/-O, but that requires postmaster -C to be
* to be supported on all old/new versions. * supported on all old/new versions.
*/ */
case 'p': case 'p':
if ((old_cluster.port = atoi(optarg)) <= 0) if ((old_cluster.port = atoi(optarg)) <= 0)
@ -322,8 +322,10 @@ void
adjust_data_dir(ClusterInfo *cluster) adjust_data_dir(ClusterInfo *cluster)
{ {
char filename[MAXPGPATH]; char filename[MAXPGPATH];
char cmd[MAXPGPATH], cmd_output[MAX_STRING]; char cmd[MAXPGPATH],
FILE *fp, *output; cmd_output[MAX_STRING];
FILE *fp,
*output;
/* If there is no postgresql.conf, it can't be a config-only dir */ /* If there is no postgresql.conf, it can't be a config-only dir */
snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig); snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig);
@ -345,10 +347,9 @@ adjust_data_dir(ClusterInfo *cluster)
CLUSTER_NAME(cluster)); CLUSTER_NAME(cluster));
/* /*
* We don't have a data directory yet, so we can't check the PG * We don't have a data directory yet, so we can't check the PG version,
* version, so this might fail --- only works for PG 9.2+. If this * so this might fail --- only works for PG 9.2+. If this fails,
* fails, pg_upgrade will fail anyway because the data files will not * pg_upgrade will fail anyway because the data files will not be found.
* be found.
*/ */
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory", snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
cluster->bindir, cluster->pgconfig); cluster->bindir, cluster->pgconfig);

View File

@ -122,11 +122,10 @@ main(int argc, char **argv)
stop_postmaster(false); stop_postmaster(false);
/* /*
* Most failures happen in create_new_objects(), which has * Most failures happen in create_new_objects(), which has completed at
* completed at this point. We do this here because it is just * this point. We do this here because it is just before linking, which
* before linking, which will link the old and new cluster data * will link the old and new cluster data files, preventing the old
* files, preventing the old cluster from being safely started * cluster from being safely started once the new cluster is started.
* once the new cluster is started.
*/ */
if (user_opts.transfer_mode == TRANSFER_MODE_LINK) if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
disable_old_cluster(); disable_old_cluster();

View File

@ -222,9 +222,11 @@ typedef struct
ControlData controldata; /* pg_control information */ ControlData controldata; /* pg_control information */
DbInfoArr dbarr; /* dbinfos array */ DbInfoArr dbarr; /* dbinfos array */
char *pgdata; /* pathname for cluster's $PGDATA directory */ char *pgdata; /* pathname for cluster's $PGDATA directory */
char *pgconfig; /* pathname for cluster's config file directory */ char *pgconfig; /* pathname for cluster's config file
* directory */
char *bindir; /* pathname for cluster's executable directory */ char *bindir; /* pathname for cluster's executable directory */
char *pgopts; /* options to pass to the server, like pg_ctl -o */ char *pgopts; /* options to pass to the server, like pg_ctl
* -o */
unsigned short port; /* port number where postmaster is waiting */ unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */ uint32 major_version; /* PG_VERSION of cluster */
char major_version_str[64]; /* string PG_VERSION of cluster */ char major_version_str[64]; /* string PG_VERSION of cluster */
@ -314,9 +316,10 @@ void split_old_dump(void);
/* exec.c */ /* exec.c */
int exec_prog(bool throw_error, bool is_priv, int
const char *log_file, const char *cmd, ...) exec_prog(bool throw_error, bool is_priv,
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5))); const char *log_file, const char *cmd,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
void verify_directories(void); void verify_directories(void);
bool is_server_running(const char *datadir); bool is_server_running(const char *datadir);
@ -353,7 +356,7 @@ const char *setupPageConverter(pageCnvCtx **result);
typedef void *pageCnvCtx; typedef void *pageCnvCtx;
#endif #endif
int load_directory(const char *dirname, struct dirent ***namelist); int load_directory(const char *dirname, struct dirent *** namelist);
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src, const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force); const char *dst, bool force);
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
@ -399,8 +402,9 @@ void init_tablespaces(void);
/* server.c */ /* server.c */
PGconn *connectToServer(ClusterInfo *cluster, const char *db_name); PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
PGresult *executeQueryOrDie(PGconn *conn, const char *fmt, ...) PGresult *
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); executeQueryOrDie(PGconn *conn, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void start_postmaster(ClusterInfo *cluster); void start_postmaster(ClusterInfo *cluster);
void stop_postmaster(bool fast); void stop_postmaster(bool fast);
@ -413,12 +417,15 @@ void check_pghost_envvar(void);
char *quote_identifier(const char *s); char *quote_identifier(const char *s);
int get_user_info(char **user_name); int get_user_info(char **user_name);
void check_ok(void); void check_ok(void);
void report_status(eLogType type, const char *fmt, ...) void
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); report_status(eLogType type, const char *fmt,...)
void pg_log(eLogType type, char *fmt, ...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); void
void prep_status(const char *fmt, ...) pg_log(eLogType type, char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
prep_status(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void check_ok(void); void check_ok(void);
char *pg_strdup(const char *s); char *pg_strdup(const char *s);
void *pg_malloc(int size); void *pg_malloc(int size);

View File

@ -34,7 +34,8 @@ const char *
transfer_all_new_dbs(DbInfoArr *old_db_arr, transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata) DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
{ {
int old_dbnum, new_dbnum; int old_dbnum,
new_dbnum;
const char *msg = NULL; const char *msg = NULL;
prep_status("%s user relation files\n", prep_status("%s user relation files\n",
@ -45,15 +46,16 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
old_dbnum < old_db_arr->ndbs; old_dbnum < old_db_arr->ndbs;
old_dbnum++, new_dbnum++) old_dbnum++, new_dbnum++)
{ {
DbInfo *old_db = &old_db_arr->dbs[old_dbnum], *new_db = NULL; DbInfo *old_db = &old_db_arr->dbs[old_dbnum],
*new_db = NULL;
FileNameMap *mappings; FileNameMap *mappings;
int n_maps; int n_maps;
pageCnvCtx *pageConverter = NULL; pageCnvCtx *pageConverter = NULL;
/* /*
* Advance past any databases that exist in the new cluster * Advance past any databases that exist in the new cluster but not in
* but not in the old, e.g. "postgres". (The user might * the old, e.g. "postgres". (The user might have removed the
* have removed the 'postgres' database from the old cluster.) * 'postgres' database from the old cluster.)
*/ */
for (; new_dbnum < new_db_arr->ndbs; new_dbnum++) for (; new_dbnum < new_db_arr->ndbs; new_dbnum++)
{ {
@ -83,8 +85,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
} }
} }
prep_status(" "); /* in case nothing printed; pass a space so gcc prep_status(" "); /* in case nothing printed; pass a space so
* doesn't complain about empty format * gcc doesn't complain about empty format
* string */ * string */
check_ok(); check_ok();

View File

@ -66,7 +66,7 @@
typedef struct win32_pthread *pthread_t; typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t; typedef int pthread_attr_t;
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg); static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return); static int pthread_join(pthread_t th, void **thread_return);
#elif defined(ENABLE_THREAD_SAFETY) #elif defined(ENABLE_THREAD_SAFETY)
/* Use platform-dependent pthread capability */ /* Use platform-dependent pthread capability */
@ -84,7 +84,7 @@ static int pthread_join(pthread_t th, void **thread_return);
typedef struct fork_pthread *pthread_t; typedef struct fork_pthread *pthread_t;
typedef int pthread_attr_t; typedef int pthread_attr_t;
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg); static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return); static int pthread_join(pthread_t th, void **thread_return);
#endif #endif
@ -1267,7 +1267,8 @@ init(void)
* versions. Since pgbench has never pretended to be fully TPC-B * versions. Since pgbench has never pretended to be fully TPC-B
* compliant anyway, we stick with the historical behavior. * compliant anyway, we stick with the historical behavior.
*/ */
struct ddlinfo { struct ddlinfo
{
char *table; char *table;
char *cols; char *cols;
int declare_fillfactor; int declare_fillfactor;
@ -1321,14 +1322,15 @@ init(void)
/* Construct new create table statement. */ /* Construct new create table statement. */
opts[0] = '\0'; opts[0] = '\0';
if (ddl->declare_fillfactor) if (ddl->declare_fillfactor)
snprintf(opts+strlen(opts), 256-strlen(opts), snprintf(opts + strlen(opts), 256 - strlen(opts),
" with (fillfactor=%d)", fillfactor); " with (fillfactor=%d)", fillfactor);
if (tablespace != NULL) if (tablespace != NULL)
{ {
char *escape_tablespace; char *escape_tablespace;
escape_tablespace = PQescapeIdentifier(con, tablespace, escape_tablespace = PQescapeIdentifier(con, tablespace,
strlen(tablespace)); strlen(tablespace));
snprintf(opts+strlen(opts), 256-strlen(opts), snprintf(opts + strlen(opts), 256 - strlen(opts),
" tablespace %s", escape_tablespace); " tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace); PQfreemem(escape_tablespace);
} }
@ -1411,9 +1413,10 @@ init(void)
if (index_tablespace != NULL) if (index_tablespace != NULL)
{ {
char *escape_tablespace; char *escape_tablespace;
escape_tablespace = PQescapeIdentifier(con, index_tablespace, escape_tablespace = PQescapeIdentifier(con, index_tablespace,
strlen(index_tablespace)); strlen(index_tablespace));
snprintf(buffer+strlen(buffer), 256-strlen(buffer), snprintf(buffer + strlen(buffer), 256 - strlen(buffer),
" using index tablespace %s", escape_tablespace); " using index tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace); PQfreemem(escape_tablespace);
} }
@ -2571,7 +2574,7 @@ typedef struct fork_pthread
static int static int
pthread_create(pthread_t *thread, pthread_create(pthread_t *thread,
pthread_attr_t * attr, pthread_attr_t *attr,
void *(*start_routine) (void *), void *(*start_routine) (void *),
void *arg) void *arg)
{ {
@ -2687,7 +2690,7 @@ win32_pthread_run(void *arg)
static int static int
pthread_create(pthread_t *thread, pthread_create(pthread_t *thread,
pthread_attr_t * attr, pthread_attr_t *attr,
void *(*start_routine) (void *), void *(*start_routine) (void *),
void *arg) void *arg)
{ {

View File

@ -34,8 +34,8 @@ char *
px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen) px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
{ {
static char *magic = "$1$"; /* This string is magic for this algorithm. static char *magic = "$1$"; /* This string is magic for this algorithm.
* Having it this way, we can get better * Having it this way, we can get better later
* later on */ * on */
static char *p; static char *p;
static const char *sp, static const char *sp,
*ep; *ep;

View File

@ -204,8 +204,9 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name);
void px_set_debug_handler(void (*handler) (const char *)); void px_set_debug_handler(void (*handler) (const char *));
#ifdef PX_DEBUG #ifdef PX_DEBUG
void px_debug(const char *fmt, ...) void
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); px_debug(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
#else #else
#define px_debug(...) #define px_debug(...)
#endif #endif

View File

@ -42,9 +42,9 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
Form_pg_database datForm; Form_pg_database datForm;
/* /*
* Oid of the source database is not saved in pg_database catalog, * Oid of the source database is not saved in pg_database catalog, so we
* so we collect its identifier using contextual information. * collect its identifier using contextual information. If NULL, its
* If NULL, its default is "template1" according to createdb(). * default is "template1" according to createdb().
*/ */
if (!dtemplate) if (!dtemplate)
dtemplate = "template1"; dtemplate = "template1";
@ -56,6 +56,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
tcontext = sepgsql_get_label(object.classId, tcontext = sepgsql_get_label(object.classId,
object.objectId, object.objectId,
object.objectSubId); object.objectSubId);
/* /*
* check db_database:{getattr} permission * check db_database:{getattr} permission
*/ */
@ -67,11 +68,11 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
true); true);
/* /*
* Compute a default security label of the newly created database * Compute a default security label of the newly created database based on
* based on a pair of security label of client and source database. * a pair of security label of client and source database.
* *
* XXX - uncoming version of libselinux supports to take object * XXX - uncoming version of libselinux supports to take object name to
* name to handle special treatment on default security label. * handle special treatment on default security label.
*/ */
rel = heap_open(DatabaseRelationId, AccessShareLock); rel = heap_open(DatabaseRelationId, AccessShareLock);
@ -91,6 +92,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(), ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext, tcontext,
SEPG_CLASS_DB_DATABASE); SEPG_CLASS_DB_DATABASE);
/* /*
* check db_database:{create} permission * check db_database:{create} permission
*/ */
@ -170,6 +172,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel)
SEPG_DB_DATABASE__RELABELFROM, SEPG_DB_DATABASE__RELABELFROM,
audit_name, audit_name,
true); true);
/* /*
* check db_database:{relabelto} permission * check db_database:{relabelto} permission
*/ */

View File

@ -115,9 +115,8 @@ sepgsql_object_access(ObjectAccessType access,
* All cases we want to apply permission checks on * All cases we want to apply permission checks on
* creation of a new relation are invocation of the * creation of a new relation are invocation of the
* heap_create_with_catalog via DefineRelation or * heap_create_with_catalog via DefineRelation or
* OpenIntoRel. * OpenIntoRel. Elsewhere, we need neither assignment
* Elsewhere, we need neither assignment of security * of security label nor permission checks.
* label nor permission checks.
*/ */
switch (sepgsql_context_info.cmdtype) switch (sepgsql_context_info.cmdtype)
{ {
@ -150,12 +149,12 @@ sepgsql_object_access(ObjectAccessType access,
case OAT_DROP: case OAT_DROP:
{ {
ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg; ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
/* /*
* No need to apply permission checks on object deletion * No need to apply permission checks on object deletion due
* due to internal cleanups; such as removal of temporary * to internal cleanups; such as removal of temporary database
* database object on session closed. * object on session closed.
*/ */
if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0) if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0)
break; break;
@ -277,19 +276,20 @@ sepgsql_utility_command(Node *parsetree,
{ {
/* /*
* Check command tag to avoid nefarious operations, and save the * Check command tag to avoid nefarious operations, and save the
* current contextual information to determine whether we should * current contextual information to determine whether we should apply
* apply permission checks here, or not. * permission checks here, or not.
*/ */
sepgsql_context_info.cmdtype = nodeTag(parsetree); sepgsql_context_info.cmdtype = nodeTag(parsetree);
switch (nodeTag(parsetree)) switch (nodeTag(parsetree))
{ {
case T_CreatedbStmt: case T_CreatedbStmt:
/* /*
* We hope to reference name of the source database, but it * We hope to reference name of the source database, but it
* does not appear in system catalog. So, we save it here. * does not appear in system catalog. So, we save it here.
*/ */
foreach (cell, ((CreatedbStmt *) parsetree)->options) foreach(cell, ((CreatedbStmt *) parsetree)->options)
{ {
DefElem *defel = (DefElem *) lfirst(cell); DefElem *defel = (DefElem *) lfirst(cell);
@ -303,6 +303,7 @@ sepgsql_utility_command(Node *parsetree,
break; break;
case T_LoadStmt: case T_LoadStmt:
/* /*
* We reject LOAD command across the board on enforcing mode, * We reject LOAD command across the board on enforcing mode,
* because a binary module can arbitrarily override hooks. * because a binary module can arbitrarily override hooks.
@ -315,6 +316,7 @@ sepgsql_utility_command(Node *parsetree,
} }
break; break;
default: default:
/* /*
* Right now we don't check any other utility commands, * Right now we don't check any other utility commands,
* because it needs more detailed information to make access * because it needs more detailed information to make access

View File

@ -65,7 +65,8 @@ static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
* and already committed */ * and already committed */
static char *client_label_func = NULL; /* set by trusted procedure */ static char *client_label_func = NULL; /* set by trusted procedure */
typedef struct { typedef struct
{
SubTransactionId subid; SubTransactionId subid;
char *label; char *label;
} pending_label; } pending_label;
@ -140,9 +141,10 @@ sepgsql_set_client_label(const char *new_label)
SEPG_PROCESS__DYNTRANSITION, SEPG_PROCESS__DYNTRANSITION,
NULL, NULL,
true); true);
/* /*
* Append the supplied new_label on the pending list until * Append the supplied new_label on the pending list until the current
* the current transaction is committed. * transaction is committed.
*/ */
oldcxt = MemoryContextSwitchTo(CurTransactionContext); oldcxt = MemoryContextSwitchTo(CurTransactionContext);
@ -181,10 +183,11 @@ sepgsql_xact_callback(XactEvent event, void *arg)
pfree(client_label_committed); pfree(client_label_committed);
client_label_committed = new_label; client_label_committed = new_label;
/* /*
* XXX - Note that items of client_label_pending are allocated * XXX - Note that items of client_label_pending are allocated on
* on CurTransactionContext, thus, all acquired memory region * CurTransactionContext, thus, all acquired memory region shall
* shall be released implicitly. * be released implicitly.
*/ */
client_label_pending = NIL; client_label_pending = NIL;
} }
@ -213,6 +216,7 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
for (cell = list_head(client_label_pending); cell; cell = next) for (cell = list_head(client_label_pending); cell; cell = next)
{ {
pending_label *plabel = lfirst(cell); pending_label *plabel = lfirst(cell);
next = lnext(cell); next = lnext(cell);
if (plabel->subid == mySubid) if (plabel->subid == mySubid)
@ -340,8 +344,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
/* /*
* process:transition permission between old and new label, * process:transition permission between old and new label,
* when user tries to switch security label of the client * when user tries to switch security label of the client on
* on execution of trusted procedure. * execution of trusted procedure.
*/ */
if (stack->new_label) if (stack->new_label)
sepgsql_avc_check_perms_label(stack->new_label, sepgsql_avc_check_perms_label(stack->new_label,

View File

@ -77,6 +77,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_SCHEMA__ADD_NAME, SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object), getObjectDescription(&object),
true); true);
/* /*
* XXX - db_language:{implement} also should be checked here * XXX - db_language:{implement} also should be checked here
*/ */
@ -97,9 +98,10 @@ sepgsql_proc_post_create(Oid functionId)
*/ */
initStringInfo(&audit_name); initStringInfo(&audit_name);
appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname)); appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
for (i=0; i < proForm->pronargs; i++) for (i = 0; i < proForm->pronargs; i++)
{ {
Oid typeoid = proForm->proargtypes.values[i]; Oid typeoid = proForm->proargtypes.values[i];
if (i > 0) if (i > 0)
appendStringInfoChar(&audit_name, ','); appendStringInfoChar(&audit_name, ',');
appendStringInfoString(&audit_name, format_type_be(typeoid)); appendStringInfoString(&audit_name, format_type_be(typeoid));
@ -111,6 +113,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_PROCEDURE__CREATE, SEPG_DB_PROCEDURE__CREATE,
audit_name.data, audit_name.data,
true); true);
/* /*
* Assign the default security label on a new procedure * Assign the default security label on a new procedure
*/ */
@ -198,6 +201,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
SEPG_DB_PROCEDURE__RELABELFROM, SEPG_DB_PROCEDURE__RELABELFROM,
audit_name, audit_name,
true); true);
/* /*
* check db_procedure:{relabelto} permission * check db_procedure:{relabelto} permission
*/ */

View File

@ -44,7 +44,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
char *scontext; char *scontext;
char *tcontext; char *tcontext;
char *ncontext; char *ncontext;
char audit_name[2*NAMEDATALEN + 20]; char audit_name[2 * NAMEDATALEN + 20];
ObjectAddress object; ObjectAddress object;
Form_pg_attribute attForm; Form_pg_attribute attForm;
@ -84,6 +84,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0); tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
ncontext = sepgsql_compute_create(scontext, tcontext, ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_COLUMN); SEPG_CLASS_DB_COLUMN);
/* /*
* check db_column:{create} permission * check db_column:{create} permission
*/ */
@ -172,6 +173,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
SEPG_DB_COLUMN__RELABELFROM, SEPG_DB_COLUMN__RELABELFROM,
audit_name, audit_name,
true); true);
/* /*
* check db_column:{relabelto} permission * check db_column:{relabelto} permission
*/ */
@ -203,7 +205,7 @@ sepgsql_relation_post_create(Oid relOid)
char *tcontext; /* schema */ char *tcontext; /* schema */
char *rcontext; /* relation */ char *rcontext; /* relation */
char *ccontext; /* column */ char *ccontext; /* column */
char audit_name[2*NAMEDATALEN + 20]; char audit_name[2 * NAMEDATALEN + 20];
/* /*
* Fetch catalog record of the new relation. Because pg_class entry is not * Fetch catalog record of the new relation. Because pg_class entry is not
@ -254,6 +256,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_SCHEMA__ADD_NAME, SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object), getObjectDescription(&object),
true); true);
/* /*
* Compute a default security label when we create a new relation object * Compute a default security label when we create a new relation object
* under the specified namespace. * under the specified namespace.
@ -273,6 +276,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_DATABASE__CREATE, SEPG_DB_DATABASE__CREATE,
audit_name, audit_name,
true); true);
/* /*
* Assign the default security label on the new relation * Assign the default security label on the new relation
*/ */
@ -315,6 +319,7 @@ sepgsql_relation_post_create(Oid relOid)
ccontext = sepgsql_compute_create(scontext, ccontext = sepgsql_compute_create(scontext,
rcontext, rcontext,
SEPG_CLASS_DB_COLUMN); SEPG_CLASS_DB_COLUMN);
/* /*
* check db_column:{create} permission * check db_column:{create} permission
*/ */
@ -404,7 +409,7 @@ sepgsql_relation_drop(Oid relOid)
int i; int i;
attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid)); attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid));
for (i=0; i < attrList->n_members; i++) for (i = 0; i < attrList->n_members; i++)
{ {
atttup = &attrList->members[i]->tuple; atttup = &attrList->members[i]->tuple;
attForm = (Form_pg_attribute) GETSTRUCT(atttup); attForm = (Form_pg_attribute) GETSTRUCT(atttup);
@ -468,6 +473,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
SEPG_DB_TABLE__RELABELFROM, SEPG_DB_TABLE__RELABELFROM,
audit_name, audit_name,
true); true);
/* /*
* check db_xxx:{relabelto} permission * check db_xxx:{relabelto} permission
*/ */

View File

@ -48,9 +48,9 @@ sepgsql_schema_post_create(Oid namespaceId)
* Compute a default security label when we create a new schema object * Compute a default security label when we create a new schema object
* under the working database. * under the working database.
* *
* XXX - uncoming version of libselinux supports to take object * XXX - uncoming version of libselinux supports to take object name to
* name to handle special treatment on default security label; * handle special treatment on default security label; such as special
* such as special label on "pg_temp" schema. * label on "pg_temp" schema.
*/ */
rel = heap_open(NamespaceRelationId, AccessShareLock); rel = heap_open(NamespaceRelationId, AccessShareLock);
@ -71,6 +71,7 @@ sepgsql_schema_post_create(Oid namespaceId)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(), ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext, tcontext,
SEPG_CLASS_DB_SCHEMA); SEPG_CLASS_DB_SCHEMA);
/* /*
* check db_schema:{create} * check db_schema:{create}
*/ */
@ -149,6 +150,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
SEPG_DB_SCHEMA__RELABELFROM, SEPG_DB_SCHEMA__RELABELFROM,
audit_name, audit_name,
true); true);
/* /*
* check db_schema:{relabelto} permission * check db_schema:{relabelto} permission
*/ */

View File

@ -248,6 +248,7 @@ extern bool sepgsql_check_perms(const char *scontext,
uint32 required, uint32 required,
const char *audit_name, const char *audit_name,
bool abort); bool abort);
/* /*
* uavc.c * uavc.c
*/ */

View File

@ -67,8 +67,8 @@ static char *avc_unlabeled; /* system 'unlabeled' label */
static uint32 static uint32
sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass) sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass)
{ {
return hash_any((const unsigned char *)scontext, strlen(scontext)) return hash_any((const unsigned char *) scontext, strlen(scontext))
^ hash_any((const unsigned char *)tcontext, strlen(tcontext)) ^ hash_any((const unsigned char *) tcontext, strlen(tcontext))
^ tclass; ^ tclass;
} }
@ -220,12 +220,12 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
index = hash % AVC_NUM_SLOTS; index = hash % AVC_NUM_SLOTS;
/* /*
* Validation check of the supplied security context. * Validation check of the supplied security context. Because it always
* Because it always invoke system-call, frequent check should be avoided. * invoke system-call, frequent check should be avoided. Unless security
* Unless security policy is reloaded, validation status shall be kept, so * policy is reloaded, validation status shall be kept, so we also cache
* we also cache whether the supplied security context was valid, or not. * whether the supplied security context was valid, or not.
*/ */
if (security_check_context_raw((security_context_t)tcontext) != 0) if (security_check_context_raw((security_context_t) tcontext) != 0)
ucontext = sepgsql_avc_unlabeled(); ucontext = sepgsql_avc_unlabeled();
/* /*
@ -237,15 +237,14 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
sepgsql_compute_avd(scontext, ucontext, tclass, &avd); sepgsql_compute_avd(scontext, ucontext, tclass, &avd);
/* /*
* It also caches a security label to be switched when a client * It also caches a security label to be switched when a client labeled as
* labeled as 'scontext' executes a procedure labeled as 'tcontext', * 'scontext' executes a procedure labeled as 'tcontext', not only access
* not only access control decision on the procedure. * control decision on the procedure. The security label to be switched
* The security label to be switched shall be computed uniquely on * shall be computed uniquely on a pair of 'scontext' and 'tcontext',
* a pair of 'scontext' and 'tcontext', thus, it is reasonable to * thus, it is reasonable to cache the new label on avc, and enables to
* cache the new label on avc, and enables to reduce unnecessary * reduce unnecessary system calls. It shall be referenced at
* system calls. * sepgsql_needs_fmgr_hook to check whether the supplied function is a
* It shall be referenced at sepgsql_needs_fmgr_hook to check whether * trusted procedure, or not.
* the supplied function is a trusted procedure, or not.
*/ */
if (tclass == SEPG_CLASS_DB_PROCEDURE) if (tclass == SEPG_CLASS_DB_PROCEDURE)
{ {
@ -314,7 +313,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
hash = sepgsql_avc_hash(scontext, tcontext, tclass); hash = sepgsql_avc_hash(scontext, tcontext, tclass);
index = hash % AVC_NUM_SLOTS; index = hash % AVC_NUM_SLOTS;
foreach (cell, avc_slots[index]) foreach(cell, avc_slots[index])
{ {
cache = lfirst(cell); cache = lfirst(cell);
@ -355,7 +354,8 @@ sepgsql_avc_check_perms_label(const char *tcontext,
bool result; bool result;
sepgsql_avc_check_valid(); sepgsql_avc_check_valid();
do { do
{
result = true; result = true;
/* /*
@ -384,9 +384,9 @@ sepgsql_avc_check_perms_label(const char *tcontext,
/* /*
* In permissive mode or permissive domain, violated permissions * In permissive mode or permissive domain, violated permissions
* shall be audited to the log files at once, and then implicitly * shall be audited to the log files at once, and then implicitly
* allowed to avoid a flood of access denied logs, because * allowed to avoid a flood of access denied logs, because the
* the purpose of permissive mode/domain is to collect a violation * purpose of permissive mode/domain is to collect a violation log
* log that will make it possible to fix up the security policy. * that will make it possible to fix up the security policy.
*/ */
if (!sepgsql_getenforce() || cache->permissive) if (!sepgsql_getenforce() || cache->permissive)
cache->allowed |= required; cache->allowed |= required;
@ -397,10 +397,10 @@ sepgsql_avc_check_perms_label(const char *tcontext,
/* /*
* In the case when we have something auditable actions here, * In the case when we have something auditable actions here,
* sepgsql_audit_log shall be called with text representation of * sepgsql_audit_log shall be called with text representation of security
* security labels for both of subject and object. * labels for both of subject and object. It records this access
* It records this access violation, so DBA will be able to find * violation, so DBA will be able to find out unexpected security problems
* out unexpected security problems later. * later.
*/ */
if (audited != 0 && if (audited != 0 &&
audit_name != SEPGSQL_AVC_NOAUDIT && audit_name != SEPGSQL_AVC_NOAUDIT &&
@ -461,7 +461,8 @@ sepgsql_avc_trusted_proc(Oid functionId)
tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG); tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG);
sepgsql_avc_check_valid(); sepgsql_avc_check_valid();
do { do
{
if (tcontext) if (tcontext)
cache = sepgsql_avc_lookup(scontext, tcontext, cache = sepgsql_avc_lookup(scontext, tcontext,
SEPG_CLASS_DB_PROCEDURE); SEPG_CLASS_DB_PROCEDURE);
@ -508,12 +509,11 @@ sepgsql_avc_init(void)
avc_threshold = AVC_DEF_THRESHOLD; avc_threshold = AVC_DEF_THRESHOLD;
/* /*
* SELinux allows to mmap(2) its kernel status page in read-only mode * SELinux allows to mmap(2) its kernel status page in read-only mode to
* to inform userspace applications its status updating (such as * inform userspace applications its status updating (such as policy
* policy reloading) without system-call invocations. * reloading) without system-call invocations. This feature is only
* This feature is only supported in Linux-2.6.38 or later, however, * supported in Linux-2.6.38 or later, however, libselinux provides a
* libselinux provides a fallback mode to know its status using * fallback mode to know its status using netlink sockets.
* netlink sockets.
*/ */
rc = selinux_status_open(1); rc = selinux_status_open(1);
if (rc < 0) if (rc < 0)

View File

@ -536,8 +536,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
/* /*
* Remember that SPI_prepare places plan in current memory context * Remember that SPI_prepare places plan in current memory context
* - so, we have to save plan in Top memory context for later * - so, we have to save plan in Top memory context for later use.
* use.
*/ */
if (SPI_keepplan(pplan)) if (SPI_keepplan(pplan))
/* internal error */ /* internal error */

View File

@ -261,8 +261,8 @@ vacuumlo(const char *database, const struct _param * param)
* We don't want to run each delete as an individual transaction, because * We don't want to run each delete as an individual transaction, because
* the commit overhead would be high. However, since 9.0 the backend will * the commit overhead would be high. However, since 9.0 the backend will
* acquire a lock per deleted LO, so deleting too many LOs per transaction * acquire a lock per deleted LO, so deleting too many LOs per transaction
* risks running out of room in the shared-memory lock table. * risks running out of room in the shared-memory lock table. Accordingly,
* Accordingly, we delete up to transaction_limit LOs per transaction. * we delete up to transaction_limit LOs per transaction.
*/ */
res = PQexec(conn, "begin"); res = PQexec(conn, "begin");
if (PQresultStatus(res) != PGRES_COMMAND_OK) if (PQresultStatus(res) != PGRES_COMMAND_OK)

View File

@ -718,9 +718,9 @@ xpath_table(PG_FUNCTION_ARGS)
xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2); xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
/* /*
* Clear the values array, so that not-well-formed documents return * Clear the values array, so that not-well-formed documents
* NULL in all columns. Note that this also means that spare columns * return NULL in all columns. Note that this also means that
* will be NULL. * spare columns will be NULL.
*/ */
for (j = 0; j < ret_tupdesc->natts; j++) for (j = 0; j < ret_tupdesc->natts; j++)
values[j] = NULL; values[j] = NULL;

View File

@ -781,8 +781,8 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
{ {
/* /*
* Page was split while we looked elsewhere. We didn't see the * Page was split while we looked elsewhere. We didn't see the
* downlink to the right page when we scanned the parent, so * downlink to the right page when we scanned the parent, so add
* add it to the queue now. * it to the queue now.
* *
* Put the right page ahead of the queue, so that we visit it * Put the right page ahead of the queue, so that we visit it
* next. That's important, because if this is the lowest internal * next. That's important, because if this is the lowest internal

View File

@ -716,8 +716,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
/* /*
* All the downlinks on the old root page are now on one of the child * All the downlinks on the old root page are now on one of the child
* pages. Visit all the new child pages to memorize the parents of * pages. Visit all the new child pages to memorize the parents of the
* the grandchildren. * grandchildren.
*/ */
if (gfbb->rootlevel > 1) if (gfbb->rootlevel > 1)
{ {
@ -734,8 +734,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
UnlockReleaseBuffer(childbuf); UnlockReleaseBuffer(childbuf);
/* /*
* Also remember that the parent of the new child page is * Also remember that the parent of the new child page is the
* the root block. * root block.
*/ */
gistMemorizeParent(buildstate, childblkno, GIST_ROOT_BLKNO); gistMemorizeParent(buildstate, childblkno, GIST_ROOT_BLKNO);
} }
@ -789,8 +789,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
* Remember the parent of each new child page in our parent map. * Remember the parent of each new child page in our parent map.
* This assumes that the downlinks fit on the parent page. If the * This assumes that the downlinks fit on the parent page. If the
* parent page is split, too, when we recurse up to insert the * parent page is split, too, when we recurse up to insert the
* downlinks, the recursive gistbufferinginserttuples() call * downlinks, the recursive gistbufferinginserttuples() call will
* will update the map again. * update the map again.
*/ */
if (level > 0) if (level > 0)
gistMemorizeParent(buildstate, gistMemorizeParent(buildstate,
@ -881,6 +881,7 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
{ {
ItemId iid = PageGetItemId(page, *downlinkoffnum); ItemId iid = PageGetItemId(page, *downlinkoffnum);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno) if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{ {
/* Still there */ /* Still there */
@ -889,16 +890,17 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
} }
/* /*
* Downlink was not at the offset where it used to be. Scan the page * Downlink was not at the offset where it used to be. Scan the page to
* to find it. During normal gist insertions, it might've moved to another * find it. During normal gist insertions, it might've moved to another
* page, to the right, but during a buffering build, we keep track of * page, to the right, but during a buffering build, we keep track of the
* the parent of each page in the lookup table so we should always know * parent of each page in the lookup table so we should always know what
* what page it's on. * page it's on.
*/ */
for (off = FirstOffsetNumber; off <= maxoff; off = OffsetNumberNext(off)) for (off = FirstOffsetNumber; off <= maxoff; off = OffsetNumberNext(off))
{ {
ItemId iid = PageGetItemId(page, off); ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno) if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{ {
/* yes!!, found it */ /* yes!!, found it */
@ -1181,6 +1183,7 @@ gistMemorizeAllDownlinks(GISTBuildState *buildstate, Buffer parentbuf)
ItemId iid = PageGetItemId(page, off); ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid)); BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
gistMemorizeParent(buildstate, childblkno, parentblkno); gistMemorizeParent(buildstate, childblkno, parentblkno);
} }
} }

View File

@ -581,8 +581,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *gist
if (v->spl_equiv == NULL) if (v->spl_equiv == NULL)
{ {
/* /*
* simple case: left and right keys for attno column are * simple case: left and right keys for attno column are equal
* equal
*/ */
gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1); gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
} }

View File

@ -224,8 +224,8 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/* /*
* Be sure to check for interrupts at least once per page. Checks at * Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters * higher code levels won't be able to stop a seqscan that encounters many
* many pages' worth of consecutive dead tuples. * pages' worth of consecutive dead tuples.
*/ */
CHECK_FOR_INTERRUPTS(); CHECK_FOR_INTERRUPTS();
@ -1590,8 +1590,8 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
* When first_call is true (and thus, skip is initially false) we'll * When first_call is true (and thus, skip is initially false) we'll
* return the first tuple we find. But on later passes, heapTuple * return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time. * will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever), * Returning it again would be incorrect (and would loop forever), so
* so we skip it and return the next match we find. * we skip it and return the next match we find.
*/ */
if (!skip) if (!skip)
{ {
@ -1885,14 +1885,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
heaptup = heap_prepare_insert(relation, tup, xid, cid, options); heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
/* /*
* We're about to do the actual insert -- but check for conflict first, * We're about to do the actual insert -- but check for conflict first, to
* to avoid possibly having to roll back work we've just done. * avoid possibly having to roll back work we've just done.
* *
* For a heap insert, we only need to check for table-level SSI locks. * For a heap insert, we only need to check for table-level SSI locks. Our
* Our new tuple can't possibly conflict with existing tuple locks, and * new tuple can't possibly conflict with existing tuple locks, and heap
* heap page locks are only consolidated versions of tuple locks; they do * page locks are only consolidated versions of tuple locks; they do not
* not lock "gaps" as index page locks do. So we don't need to identify * lock "gaps" as index page locks do. So we don't need to identify a
* a buffer before making the call. * buffer before making the call.
*/ */
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2123,11 +2123,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* We're about to do the actual inserts -- but check for conflict first, * We're about to do the actual inserts -- but check for conflict first,
* to avoid possibly having to roll back work we've just done. * to avoid possibly having to roll back work we've just done.
* *
* For a heap insert, we only need to check for table-level SSI locks. * For a heap insert, we only need to check for table-level SSI locks. Our
* Our new tuple can't possibly conflict with existing tuple locks, and * new tuple can't possibly conflict with existing tuple locks, and heap
* heap page locks are only consolidated versions of tuple locks; they do * page locks are only consolidated versions of tuple locks; they do not
* not lock "gaps" as index page locks do. So we don't need to identify * lock "gaps" as index page locks do. So we don't need to identify a
* a buffer before making the call. * buffer before making the call.
*/ */
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2140,9 +2140,8 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
int nthispage; int nthispage;
/* /*
* Find buffer where at least the next tuple will fit. If the page * Find buffer where at least the next tuple will fit. If the page is
* is all-visible, this will also pin the requisite visibility map * all-visible, this will also pin the requisite visibility map page.
* page.
*/ */
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len, buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
InvalidBuffer, options, bistate, InvalidBuffer, options, bistate,
@ -2372,10 +2371,10 @@ heap_delete(Relation relation, ItemPointer tid,
page = BufferGetPage(buffer); page = BufferGetPage(buffer);
/* /*
* Before locking the buffer, pin the visibility map page if it appears * Before locking the buffer, pin the visibility map page if it appears to
* to be necessary. Since we haven't got the lock yet, someone else might * be necessary. Since we haven't got the lock yet, someone else might be
* be in the middle of changing this, so we'll need to recheck after * in the middle of changing this, so we'll need to recheck after we have
* we have the lock. * the lock.
*/ */
if (PageIsAllVisible(page)) if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer); visibilitymap_pin(relation, block, &vmbuffer);
@ -2753,10 +2752,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
page = BufferGetPage(buffer); page = BufferGetPage(buffer);
/* /*
* Before locking the buffer, pin the visibility map page if it appears * Before locking the buffer, pin the visibility map page if it appears to
* to be necessary. Since we haven't got the lock yet, someone else might * be necessary. Since we haven't got the lock yet, someone else might be
* be in the middle of changing this, so we'll need to recheck after * in the middle of changing this, so we'll need to recheck after we have
* we have the lock. * the lock.
*/ */
if (PageIsAllVisible(page)) if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer); visibilitymap_pin(relation, block, &vmbuffer);
@ -2900,11 +2899,11 @@ l2:
/* /*
* If we didn't pin the visibility map page and the page has become all * If we didn't pin the visibility map page and the page has become all
* visible while we were busy locking the buffer, or during some subsequent * visible while we were busy locking the buffer, or during some
* window during which we had it unlocked, we'll have to unlock and * subsequent window during which we had it unlocked, we'll have to unlock
* re-lock, to avoid holding the buffer lock across an I/O. That's a bit * and re-lock, to avoid holding the buffer lock across an I/O. That's a
* unfortunate, esepecially since we'll now have to recheck whether the * bit unfortunate, esepecially since we'll now have to recheck whether
* tuple has been locked or updated under us, but hopefully it won't * the tuple has been locked or updated under us, but hopefully it won't
* happen very often. * happen very often.
*/ */
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@ -3196,11 +3195,11 @@ l2:
/* /*
* Mark old tuple for invalidation from system caches at next command * Mark old tuple for invalidation from system caches at next command
* boundary, and mark the new tuple for invalidation in case we abort. * boundary, and mark the new tuple for invalidation in case we abort. We
* We have to do this before releasing the buffer because oldtup is in * have to do this before releasing the buffer because oldtup is in the
* the buffer. (heaptup is all in local memory, but it's necessary to * buffer. (heaptup is all in local memory, but it's necessary to process
* process both tuple versions in one call to inval.c so we can avoid * both tuple versions in one call to inval.c so we can avoid redundant
* redundant sinval messages.) * sinval messages.)
*/ */
CacheInvalidateHeapTuple(relation, &oldtup, heaptup); CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
@ -4731,17 +4730,16 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/* /*
* We don't bump the LSN of the heap page when setting the visibility * We don't bump the LSN of the heap page when setting the visibility map
* map bit, because that would generate an unworkable volume of * bit, because that would generate an unworkable volume of full-page
* full-page writes. This exposes us to torn page hazards, but since * writes. This exposes us to torn page hazards, but since we're not
* we're not inspecting the existing page contents in any way, we * inspecting the existing page contents in any way, we don't care.
* don't care.
* *
* However, all operations that clear the visibility map bit *do* bump * However, all operations that clear the visibility map bit *do* bump the
* the LSN, and those operations will only be replayed if the XLOG LSN * LSN, and those operations will only be replayed if the XLOG LSN follows
* follows the page LSN. Thus, if the page LSN has advanced past our * the page LSN. Thus, if the page LSN has advanced past our XLOG
* XLOG record's LSN, we mustn't mark the page all-visible, because * record's LSN, we mustn't mark the page all-visible, because the
* the subsequent update won't be replayed to clear the flag. * subsequent update won't be replayed to clear the flag.
*/ */
if (!XLByteLE(lsn, PageGetLSN(page))) if (!XLByteLE(lsn, PageGetLSN(page)))
{ {
@ -4772,10 +4770,10 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* Don't set the bit if replay has already passed this point. * Don't set the bit if replay has already passed this point.
* *
* It might be safe to do this unconditionally; if replay has past * It might be safe to do this unconditionally; if replay has past
* this point, we'll replay at least as far this time as we did before, * this point, we'll replay at least as far this time as we did
* and if this bit needs to be cleared, the record responsible for * before, and if this bit needs to be cleared, the record responsible
* doing so should be again replayed, and clear it. For right now, * for doing so should be again replayed, and clear it. For right
* out of an abundance of conservatism, we use the same test here * now, out of an abundance of conservatism, we use the same test here
* we did for the heap page; if this results in a dropped bit, no real * we did for the heap page; if this results in a dropped bit, no real
* harm is done; and the next VACUUM will fix it. * harm is done; and the next VACUUM will fix it.
*/ */

View File

@ -302,11 +302,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
* block if one was given, taking suitable care with lock ordering and * block if one was given, taking suitable care with lock ordering and
* the possibility they are the same block. * the possibility they are the same block.
* *
* If the page-level all-visible flag is set, caller will need to clear * If the page-level all-visible flag is set, caller will need to
* both that and the corresponding visibility map bit. However, by the * clear both that and the corresponding visibility map bit. However,
* time we return, we'll have x-locked the buffer, and we don't want to * by the time we return, we'll have x-locked the buffer, and we don't
* do any I/O while in that state. So we check the bit here before * want to do any I/O while in that state. So we check the bit here
* taking the lock, and pin the page if it appears necessary. * before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong * Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock. * answer, so we'll have to recheck after acquiring the lock.
*/ */
@ -348,22 +348,23 @@ RelationGetBufferForTuple(Relation relation, Size len,
/* /*
* We now have the target page (and the other buffer, if any) pinned * We now have the target page (and the other buffer, if any) pinned
* and locked. However, since our initial PageIsAllVisible checks * and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now * were performed before acquiring the lock, the results might now be
* be out of date, either for the selected victim buffer, or for the * out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to give * other buffer passed by the caller. In that case, we'll need to
* up our locks, go get the pin(s) we failed to get earlier, and * give up our locks, go get the pin(s) we failed to get earlier, and
* re-lock. That's pretty painful, but hopefully shouldn't happen * re-lock. That's pretty painful, but hopefully shouldn't happen
* often. * often.
* *
* Note that there's a small possibility that we didn't pin the * Note that there's a small possibility that we didn't pin the page
* page above but still have the correct page pinned anyway, either * above but still have the correct page pinned anyway, either because
* because we've already made a previous pass through this loop, or * we've already made a previous pass through this loop, or because
* because caller passed us the right page anyway. * caller passed us the right page anyway.
* *
* Note also that it's possible that by the time we get the pin and * Note also that it's possible that by the time we get the pin and
* retake the buffer locks, the visibility map bit will have been * retake the buffer locks, the visibility map bit will have been
* cleared by some other backend anyway. In that case, we'll have done * cleared by some other backend anyway. In that case, we'll have
* a bit of extra work for no gain, but there's no real harm done. * done a bit of extra work for no gain, but there's no real harm
* done.
*/ */
if (otherBuffer == InvalidBuffer || buffer <= otherBuffer) if (otherBuffer == InvalidBuffer || buffer <= otherBuffer)
GetVisibilityMapPins(relation, buffer, otherBuffer, GetVisibilityMapPins(relation, buffer, otherBuffer,

View File

@ -75,7 +75,7 @@ do { \
static void toast_delete_datum(Relation rel, Datum value); static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value, static Datum toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options); struct varlena * oldexternal, int options);
static bool toastrel_valueid_exists(Relation toastrel, Oid valueid); static bool toastrel_valueid_exists(Relation toastrel, Oid valueid);
static bool toastid_valueid_exists(Oid toastrelid, Oid valueid); static bool toastid_valueid_exists(Oid toastrelid, Oid valueid);
static struct varlena *toast_fetch_datum(struct varlena * attr); static struct varlena *toast_fetch_datum(struct varlena * attr);
@ -1233,7 +1233,7 @@ toast_compress_datum(Datum value)
*/ */
static Datum static Datum
toast_save_datum(Relation rel, Datum value, toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options) struct varlena * oldexternal, int options)
{ {
Relation toastrel; Relation toastrel;
Relation toastidx; Relation toastidx;

View File

@ -356,16 +356,16 @@ visibilitymap_count(Relation rel)
BlockNumber result = 0; BlockNumber result = 0;
BlockNumber mapBlock; BlockNumber mapBlock;
for (mapBlock = 0; ; mapBlock++) for (mapBlock = 0;; mapBlock++)
{ {
Buffer mapBuffer; Buffer mapBuffer;
unsigned char *map; unsigned char *map;
int i; int i;
/* /*
* Read till we fall off the end of the map. We assume that any * Read till we fall off the end of the map. We assume that any extra
* extra bytes in the last page are zeroed, so we don't bother * bytes in the last page are zeroed, so we don't bother excluding
* excluding them from the count. * them from the count.
*/ */
mapBuffer = vm_readbuf(rel, mapBlock, false); mapBuffer = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(mapBuffer)) if (!BufferIsValid(mapBuffer))
@ -496,11 +496,11 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
Buffer buf; Buffer buf;
/* /*
* We might not have opened the relation at the smgr level yet, or we might * We might not have opened the relation at the smgr level yet, or we
* have been forced to close it by a sinval message. The code below won't * might have been forced to close it by a sinval message. The code below
* necessarily notice relation extension immediately when extend = false, * won't necessarily notice relation extension immediately when extend =
* so we rely on sinval messages to ensure that our ideas about the size of * false, so we rely on sinval messages to ensure that our ideas about the
* the map aren't too far out of date. * size of the map aren't too far out of date.
*/ */
RelationOpenSmgr(rel); RelationOpenSmgr(rel);

View File

@ -530,8 +530,8 @@ index_fetch_heap(IndexScanDesc scan)
if (got_heap_tuple) if (got_heap_tuple)
{ {
/* /*
* Only in a non-MVCC snapshot can more than one member of the * Only in a non-MVCC snapshot can more than one member of the HOT
* HOT chain be visible. * chain be visible.
*/ */
scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot); scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot);
pgstat_count_heap_fetch(scan->indexRelation); pgstat_count_heap_fetch(scan->indexRelation);

View File

@ -227,8 +227,8 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
} }
/* /*
* Make a scan-lifespan context to hold array-associated data, or reset * Make a scan-lifespan context to hold array-associated data, or reset it
* it if we already have one from a previous rescan cycle. * if we already have one from a previous rescan cycle.
*/ */
if (so->arrayContext == NULL) if (so->arrayContext == NULL)
so->arrayContext = AllocSetContextCreate(CurrentMemoryContext, so->arrayContext = AllocSetContextCreate(CurrentMemoryContext,
@ -387,9 +387,10 @@ _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
/* /*
* Look up the appropriate comparison operator in the opfamily. * Look up the appropriate comparison operator in the opfamily.
* *
* Note: it's possible that this would fail, if the opfamily is incomplete, * Note: it's possible that this would fail, if the opfamily is
* but it seems quite unlikely that an opfamily would omit non-cross-type * incomplete, but it seems quite unlikely that an opfamily would omit
* comparison operators for any datatype that it supports at all. * non-cross-type comparison operators for any datatype that it supports
* at all.
*/ */
cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1], cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1],
elemtype, elemtype,
@ -455,9 +456,10 @@ _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
/* /*
* Look up the appropriate comparison function in the opfamily. * Look up the appropriate comparison function in the opfamily.
* *
* Note: it's possible that this would fail, if the opfamily is incomplete, * Note: it's possible that this would fail, if the opfamily is
* but it seems quite unlikely that an opfamily would omit non-cross-type * incomplete, but it seems quite unlikely that an opfamily would omit
* support functions for any datatype that it supports at all. * non-cross-type support functions for any datatype that it supports at
* all.
*/ */
cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1], cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
elemtype, elemtype,
@ -781,8 +783,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
* set qual_ok to false and abandon further processing. * set qual_ok to false and abandon further processing.
* *
* We also have to deal with the case of "key IS NULL", which is * We also have to deal with the case of "key IS NULL", which is
* unsatisfiable in combination with any other index condition. * unsatisfiable in combination with any other index condition. By
* By the time we get here, that's been classified as an equality * the time we get here, that's been classified as an equality
* check, and we've rejected any combination of it with a regular * check, and we've rejected any combination of it with a regular
* equality condition; but not with other types of conditions. * equality condition; but not with other types of conditions.
*/ */
@ -1424,9 +1426,9 @@ _bt_checkkeys(IndexScanDesc scan,
* index attr. On a backward scan, we can stop if this qual * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless * is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required, * of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. * because it's not possible for any future tuples to pass. On
* On a forward scan, however, we must keep going, because we * a forward scan, however, we must keep going, because we may
* may have initially positioned to the start of the index. * have initially positioned to the start of the index.
*/ */
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir)) ScanDirectionIsBackward(dir))
@ -1440,8 +1442,8 @@ _bt_checkkeys(IndexScanDesc scan,
* index attr. On a forward scan, we can stop if this qual is * index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required, * whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. * because it's not possible for any future tuples to pass. On
* On a backward scan, however, we must keep going, because we * a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index. * may have initially positioned to the end of the index.
*/ */
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
@ -1535,9 +1537,9 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* index attr. On a backward scan, we can stop if this qual * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless * is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required, * of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. * because it's not possible for any future tuples to pass. On
* On a forward scan, however, we must keep going, because we * a forward scan, however, we must keep going, because we may
* may have initially positioned to the start of the index. * have initially positioned to the start of the index.
*/ */
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir)) ScanDirectionIsBackward(dir))
@ -1551,8 +1553,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* index attr. On a forward scan, we can stop if this qual is * index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required, * whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. * because it's not possible for any future tuples to pass. On
* On a backward scan, however, we must keep going, because we * a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index. * may have initially positioned to the end of the index.
*/ */
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) && if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&

View File

@ -250,9 +250,9 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else else
{ {
/* /*
* Tuple must be inserted into existing chain. We mustn't change * Tuple must be inserted into existing chain. We mustn't change the
* the chain's head address, but we don't need to chase the entire * chain's head address, but we don't need to chase the entire chain
* chain to put the tuple at the end; we can insert it second. * to put the tuple at the end; we can insert it second.
* *
* Also, it's possible that the "chain" consists only of a DEAD tuple, * Also, it's possible that the "chain" consists only of a DEAD tuple,
* in which case we should replace the DEAD tuple in-place. * in which case we should replace the DEAD tuple in-place.
@ -516,9 +516,9 @@ moveLeafs(Relation index, SpGistState *state,
leafptr += newLeafTuple->size; leafptr += newLeafTuple->size;
/* /*
* Now delete the old tuples, leaving a redirection pointer behind for * Now delete the old tuples, leaving a redirection pointer behind for the
* the first one, unless we're doing an index build; in which case there * first one, unless we're doing an index build; in which case there can't
* can't be any concurrent scan so we need not provide a redirect. * be any concurrent scan so we need not provide a redirect.
*/ */
spgPageIndexMultiDelete(state, current->page, toDelete, nDelete, spgPageIndexMultiDelete(state, current->page, toDelete, nDelete,
state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT, state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
@ -754,8 +754,8 @@ doPickSplit(Relation index, SpGistState *state,
{ {
/* /*
* We are splitting the root (which up to now is also a leaf page). * We are splitting the root (which up to now is also a leaf page).
* Its tuples are not linked, so scan sequentially to get them all. * Its tuples are not linked, so scan sequentially to get them all. We
* We ignore the original value of current->offnum. * ignore the original value of current->offnum.
*/ */
for (i = FirstOffsetNumber; i <= max; i++) for (i = FirstOffsetNumber; i <= max; i++)
{ {
@ -924,8 +924,8 @@ doPickSplit(Relation index, SpGistState *state,
innerTuple->allTheSame = allTheSame; innerTuple->allTheSame = allTheSame;
/* /*
* Update nodes[] array to point into the newly formed innerTuple, so * Update nodes[] array to point into the newly formed innerTuple, so that
* that we can adjust their downlinks below. * we can adjust their downlinks below.
*/ */
SGITITERATE(innerTuple, i, node) SGITITERATE(innerTuple, i, node)
{ {
@ -944,13 +944,13 @@ doPickSplit(Relation index, SpGistState *state,
} }
/* /*
* To perform the split, we must insert a new inner tuple, which can't * To perform the split, we must insert a new inner tuple, which can't go
* go on a leaf page; and unless we are splitting the root page, we * on a leaf page; and unless we are splitting the root page, we must then
* must then update the parent tuple's downlink to point to the inner * update the parent tuple's downlink to point to the inner tuple. If
* tuple. If there is room, we'll put the new inner tuple on the same * there is room, we'll put the new inner tuple on the same page as the
* page as the parent tuple, otherwise we need another non-leaf buffer. * parent tuple, otherwise we need another non-leaf buffer. But if the
* But if the parent page is the root, we can't add the new inner tuple * parent page is the root, we can't add the new inner tuple there,
* there, because the root page must have only one inner tuple. * because the root page must have only one inner tuple.
*/ */
xlrec.initInner = false; xlrec.initInner = false;
if (parent->buffer != InvalidBuffer && if (parent->buffer != InvalidBuffer &&
@ -977,22 +977,22 @@ doPickSplit(Relation index, SpGistState *state,
} }
/* /*
* Because a WAL record can't involve more than four buffers, we can * Because a WAL record can't involve more than four buffers, we can only
* only afford to deal with two leaf pages in each picksplit action, * afford to deal with two leaf pages in each picksplit action, ie the
* ie the current page and at most one other. * current page and at most one other.
* *
* The new leaf tuples converted from the existing ones should require * The new leaf tuples converted from the existing ones should require the
* the same or less space, and therefore should all fit onto one page * same or less space, and therefore should all fit onto one page
* (although that's not necessarily the current page, since we can't * (although that's not necessarily the current page, since we can't
* delete the old tuples but only replace them with placeholders). * delete the old tuples but only replace them with placeholders).
* However, the incoming new tuple might not also fit, in which case * However, the incoming new tuple might not also fit, in which case we
* we might need another picksplit cycle to reduce it some more. * might need another picksplit cycle to reduce it some more.
* *
* If there's not room to put everything back onto the current page, * If there's not room to put everything back onto the current page, then
* then we decide on a per-node basis which tuples go to the new page. * we decide on a per-node basis which tuples go to the new page. (We do
* (We do it like that because leaf tuple chains can't cross pages, * it like that because leaf tuple chains can't cross pages, so we must
* so we must place all leaf tuples belonging to the same parent node * place all leaf tuples belonging to the same parent node on the same
* on the same page.) * page.)
* *
* If we are splitting the root page (turning it from a leaf page into an * If we are splitting the root page (turning it from a leaf page into an
* inner page), then no leaf tuples can go back to the current page; they * inner page), then no leaf tuples can go back to the current page; they
@ -1041,6 +1041,7 @@ doPickSplit(Relation index, SpGistState *state,
Min(totalLeafSizes, Min(totalLeafSizes,
SPGIST_PAGE_CAPACITY), SPGIST_PAGE_CAPACITY),
&xlrec.initDest); &xlrec.initDest);
/* /*
* Attempt to assign node groups to the two pages. We might fail to * Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space, * do so, even if totalLeafSizes is less than the available space,
@ -1584,8 +1585,8 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.nodeI = parent->node; xlrec.nodeI = parent->node;
/* /*
* obtain new buffer with the same parity as current, since it will * obtain new buffer with the same parity as current, since it will be
* be a child of same parent tuple * a child of same parent tuple
*/ */
current->buffer = SpGistGetBuffer(index, current->buffer = SpGistGetBuffer(index,
GBUF_INNER_PARITY(current->blkno), GBUF_INNER_PARITY(current->blkno),
@ -1597,12 +1598,12 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.blknoNew = current->blkno; xlrec.blknoNew = current->blkno;
/* /*
* Let's just make real sure new current isn't same as old. Right * Let's just make real sure new current isn't same as old. Right now
* now that's impossible, but if SpGistGetBuffer ever got smart enough * that's impossible, but if SpGistGetBuffer ever got smart enough to
* to delete placeholder tuples before checking space, maybe it * delete placeholder tuples before checking space, maybe it wouldn't
* wouldn't be impossible. The case would appear to work except that * be impossible. The case would appear to work except that WAL
* WAL replay would be subtly wrong, so I think a mere assert isn't * replay would be subtly wrong, so I think a mere assert isn't enough
* enough here. * here.
*/ */
if (xlrec.blknoNew == xlrec.blkno) if (xlrec.blknoNew == xlrec.blkno)
elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer"); elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
@ -1707,9 +1708,9 @@ spgSplitNodeAction(Relation index, SpGistState *state,
Assert(!SpGistPageStoresNulls(current->page)); Assert(!SpGistPageStoresNulls(current->page));
/* /*
* Construct new prefix tuple, containing a single node with the * Construct new prefix tuple, containing a single node with the specified
* specified label. (We'll update the node's downlink to point to the * label. (We'll update the node's downlink to point to the new postfix
* new postfix tuple, below.) * tuple, below.)
*/ */
node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false); node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false);
@ -2077,8 +2078,8 @@ spgdoinsert(Relation index, SpGistState *state,
} }
/* /*
* Loop around and attempt to insert the new leafDatum * Loop around and attempt to insert the new leafDatum at
* at "current" (which might reference an existing child * "current" (which might reference an existing child
* tuple, or might be invalid to force us to find a new * tuple, or might be invalid to force us to find a new
* page for the tuple). * page for the tuple).
* *
@ -2102,8 +2103,8 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel); out.result.addNode.nodeLabel);
/* /*
* Retry insertion into the enlarged node. We assume * Retry insertion into the enlarged node. We assume that
* that we'll get a MatchNode result this time. * we'll get a MatchNode result this time.
*/ */
goto process_inner_tuple; goto process_inner_tuple;
break; break;

View File

@ -135,12 +135,12 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
/* /*
* Note: points that have coordinates exactly equal to coord may get * Note: points that have coordinates exactly equal to coord may get
* classified into either node, depending on where they happen to fall * classified into either node, depending on where they happen to fall in
* in the sorted list. This is okay as long as the inner_consistent * the sorted list. This is okay as long as the inner_consistent function
* function descends into both sides for such cases. This is better * descends into both sides for such cases. This is better than the
* than the alternative of trying to have an exact boundary, because * alternative of trying to have an exact boundary, because it keeps the
* it keeps the tree balanced even when we have many instances of the * tree balanced even when we have many instances of the same point value.
* same point value. So we should never trigger the allTheSame logic. * So we should never trigger the allTheSame logic.
*/ */
for (i = 0; i < in->nTuples; i++) for (i = 0; i < in->nTuples; i++)
{ {

View File

@ -177,6 +177,7 @@ spgbeginscan(PG_FUNCTION_ARGS)
{ {
Relation rel = (Relation) PG_GETARG_POINTER(0); Relation rel = (Relation) PG_GETARG_POINTER(0);
int keysz = PG_GETARG_INT32(1); int keysz = PG_GETARG_INT32(1);
/* ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); */ /* ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); */
IndexScanDesc scan; IndexScanDesc scan;
SpGistScanOpaque so; SpGistScanOpaque so;

View File

@ -209,9 +209,9 @@ spg_text_choose(PG_FUNCTION_ARGS)
{ {
/* /*
* Descend to existing node. (If in->allTheSame, the core code will * Descend to existing node. (If in->allTheSame, the core code will
* ignore our nodeN specification here, but that's OK. We still * ignore our nodeN specification here, but that's OK. We still have
* have to provide the correct levelAdd and restDatum values, and * to provide the correct levelAdd and restDatum values, and those are
* those are the same regardless of which node gets chosen by core.) * the same regardless of which node gets chosen by core.)
*/ */
out->resultType = spgMatchNode; out->resultType = spgMatchNode;
out->result.matchNode.nodeN = i; out->result.matchNode.nodeN = i;
@ -227,10 +227,10 @@ spg_text_choose(PG_FUNCTION_ARGS)
else if (in->allTheSame) else if (in->allTheSame)
{ {
/* /*
* Can't use AddNode action, so split the tuple. The upper tuple * Can't use AddNode action, so split the tuple. The upper tuple has
* has the same prefix as before and uses an empty node label for * the same prefix as before and uses an empty node label for the
* the lower tuple. The lower tuple has no prefix and the same * lower tuple. The lower tuple has no prefix and the same node
* node labels as the original tuple. * labels as the original tuple.
*/ */
out->resultType = spgSplitTuple; out->resultType = spgSplitTuple;
out->result.splitTuple.prefixHasPrefix = in->hasPrefix; out->result.splitTuple.prefixHasPrefix = in->hasPrefix;

View File

@ -276,8 +276,8 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else if (prevLive == InvalidOffsetNumber) else if (prevLive == InvalidOffsetNumber)
{ {
/* /*
* This is the first live tuple in the chain. It has * This is the first live tuple in the chain. It has to move
* to move to the head position. * to the head position.
*/ */
moveSrc[xlrec.nMove] = j; moveSrc[xlrec.nMove] = j;
moveDest[xlrec.nMove] = i; moveDest[xlrec.nMove] = i;
@ -353,11 +353,11 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
InvalidBlockNumber, InvalidOffsetNumber); InvalidBlockNumber, InvalidOffsetNumber);
/* /*
* We implement the move step by swapping the item pointers of the * We implement the move step by swapping the item pointers of the source
* source and target tuples, then replacing the newly-source tuples * and target tuples, then replacing the newly-source tuples with
* with placeholders. This is perhaps unduly friendly with the page * placeholders. This is perhaps unduly friendly with the page data
* data representation, but it's fast and doesn't risk page overflow * representation, but it's fast and doesn't risk page overflow when a
* when a tuple to be relocated is large. * tuple to be relocated is large.
*/ */
for (i = 0; i < xlrec.nMove; i++) for (i = 0; i < xlrec.nMove; i++)
{ {
@ -651,9 +651,9 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno)
/* /*
* The root pages must never be deleted, nor marked as available in FSM, * The root pages must never be deleted, nor marked as available in FSM,
* because we don't want them ever returned by a search for a place to * because we don't want them ever returned by a search for a place to put
* put a new tuple. Otherwise, check for empty/deletable page, and * a new tuple. Otherwise, check for empty/deletable page, and make sure
* make sure FSM knows about it. * FSM knows about it.
*/ */
if (!SpGistBlockIsRoot(blkno)) if (!SpGistBlockIsRoot(blkno))
{ {
@ -741,11 +741,11 @@ spgprocesspending(spgBulkDeleteState *bds)
else else
{ {
/* /*
* On an inner page, visit the referenced inner tuple and add * On an inner page, visit the referenced inner tuple and add all
* all its downlinks to the pending list. We might have pending * its downlinks to the pending list. We might have pending items
* items for more than one inner tuple on the same page (in fact * for more than one inner tuple on the same page (in fact this is
* this is pretty likely given the way space allocation works), * pretty likely given the way space allocation works), so get
* so get them all while we are here. * them all while we are here.
*/ */
for (nitem = pitem; nitem != NULL; nitem = nitem->next) for (nitem = pitem; nitem != NULL; nitem = nitem->next)
{ {
@ -825,8 +825,8 @@ spgvacuumscan(spgBulkDeleteState *bds)
* physical order (we hope the kernel will cooperate in providing * physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages, * read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to * including ones added after we start the scan, else we might fail to
* delete some deletable tuples. See more extensive comments about * delete some deletable tuples. See more extensive comments about this
* this in btvacuumscan(). * in btvacuumscan().
*/ */
blkno = SPGIST_METAPAGE_BLKNO + 1; blkno = SPGIST_METAPAGE_BLKNO + 1;
for (;;) for (;;)

View File

@ -587,8 +587,8 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
{ {
/* /*
* We have it a bit easier here than in doPickSplit(), * We have it a bit easier here than in doPickSplit(),
* because we know the inner tuple's location already, * because we know the inner tuple's location already, so
* so we can inject the correct redirection tuple now. * we can inject the correct redirection tuple now.
*/ */
if (!state.isBuild) if (!state.isBuild)
spgPageIndexMultiDelete(&state, page, spgPageIndexMultiDelete(&state, page,

View File

@ -927,8 +927,8 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
* other choice: a read-busy slot will not be least recently used once * other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is * the read finishes, and waiting for an I/O on a write-busy slot is
* inferior to just picking some other slot. Testing shows the slot * inferior to just picking some other slot. Testing shows the slot
* we pick instead will often be clean, allowing us to begin a read * we pick instead will often be clean, allowing us to begin a read at
* at once. * once.
* *
* Normally the page_lru_count values will all be different and so * Normally the page_lru_count values will all be different and so
* there will be a well-defined LRU page. But since we allow * there will be a well-defined LRU page. But since we allow
@ -997,10 +997,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/* /*
* If all pages (except possibly the latest one) are I/O busy, we'll * If all pages (except possibly the latest one) are I/O busy, we'll
* have to wait for an I/O to complete and then retry. In that unhappy * have to wait for an I/O to complete and then retry. In that
* case, we choose to wait for the I/O on the least recently used slot, * unhappy case, we choose to wait for the I/O on the least recently
* on the assumption that it was likely initiated first of all the I/Os * used slot, on the assumption that it was likely initiated first of
* in progress and may therefore finish first. * all the I/Os in progress and may therefore finish first.
*/ */
if (best_valid_delta < 0) if (best_valid_delta < 0)
{ {

View File

@ -362,6 +362,7 @@ GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts,
{ {
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno]; PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
/* We need no extra lock since the GXACT isn't valid yet */ /* We need no extra lock since the GXACT isn't valid yet */
if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS) if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS)
{ {
@ -1063,9 +1064,9 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m"))); errmsg("could not close two-phase state file: %m")));
/* /*
* Mark the prepared transaction as valid. As soon as xact.c marks MyPgXact * Mark the prepared transaction as valid. As soon as xact.c marks
* as not running our XID (which it will do immediately after this * MyPgXact as not running our XID (which it will do immediately after
* function returns), others can commit/rollback the xact. * this function returns), others can commit/rollback the xact.
* *
* NB: a side effect of this is to make a dummy ProcArray entry for the * NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyPgXact, * prepared XID. This must happen before we clear the XID from MyPgXact,

View File

@ -174,8 +174,8 @@ GetNewTransactionId(bool isSubXact)
* latestCompletedXid is present in the ProcArray, which is essential for * latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README. * correct OldestXmin tracking; see src/backend/access/transam/README.
* *
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we are * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
* relying on fetch/store of an xid to be atomic, else other backends * are relying on fetch/store of an xid to be atomic, else other backends
* might see a partially-set xid here. But holding both locks at once * might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity. * would be a nasty concurrency hit. So for now, assume atomicity.
* *

View File

@ -1019,6 +1019,7 @@ RecordTransactionCommit(void)
XLogRecData rdata[4]; XLogRecData rdata[4];
int lastrdata = 0; int lastrdata = 0;
xl_xact_commit xlrec; xl_xact_commit xlrec;
/* /*
* Set flags required for recovery processing of commits. * Set flags required for recovery processing of commits.
*/ */
@ -1074,6 +1075,7 @@ RecordTransactionCommit(void)
XLogRecData rdata[2]; XLogRecData rdata[2];
int lastrdata = 0; int lastrdata = 0;
xl_xact_commit_compact xlrec; xl_xact_commit_compact xlrec;
xlrec.xact_time = xactStopTimestamp; xlrec.xact_time = xactStopTimestamp;
xlrec.nsubxacts = nchildren; xlrec.nsubxacts = nchildren;
rdata[0].data = (char *) (&xlrec); rdata[0].data = (char *) (&xlrec);
@ -2602,10 +2604,10 @@ CommitTransactionCommand(void)
break; break;
/* /*
* We were issued a RELEASE command, so we end the * We were issued a RELEASE command, so we end the current
* current subtransaction and return to the parent transaction. * subtransaction and return to the parent transaction. The parent
* The parent might be ended too, so repeat till we find an * might be ended too, so repeat till we find an INPROGRESS
* INPROGRESS transaction or subtransaction. * transaction or subtransaction.
*/ */
case TBLOCK_SUBRELEASE: case TBLOCK_SUBRELEASE:
do do
@ -2623,9 +2625,9 @@ CommitTransactionCommand(void)
* hierarchy and perform final commit. We do this by rolling up * hierarchy and perform final commit. We do this by rolling up
* any subtransactions into their parent, which leads to O(N^2) * any subtransactions into their parent, which leads to O(N^2)
* operations with respect to resource owners - this isn't that * operations with respect to resource owners - this isn't that
* bad until we approach a thousands of savepoints but is necessary * bad until we approach a thousands of savepoints but is
* for correctness should after triggers create new resource * necessary for correctness should after triggers create new
* owners. * resource owners.
*/ */
case TBLOCK_SUBCOMMIT: case TBLOCK_SUBCOMMIT:
do do
@ -4659,6 +4661,7 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
XLogFlush(lsn); XLogFlush(lsn);
} }
/* /*
* Utility function to call xact_redo_commit_internal after breaking down xlrec * Utility function to call xact_redo_commit_internal after breaking down xlrec
*/ */

View File

@ -344,10 +344,10 @@ typedef struct XLogCtlInsert
/* /*
* fullPageWrites is the master copy used by all backends to determine * fullPageWrites is the master copy used by all backends to determine
* whether to write full-page to WAL, instead of using process-local * whether to write full-page to WAL, instead of using process-local one.
* one. This is required because, when full_page_writes is changed * This is required because, when full_page_writes is changed by SIGHUP,
* by SIGHUP, we must WAL-log it before it actually affects * we must WAL-log it before it actually affects WAL-logging by backends.
* WAL-logging by backends. Checkpointer sets at startup or after SIGHUP. * Checkpointer sets at startup or after SIGHUP.
*/ */
bool fullPageWrites; bool fullPageWrites;
@ -455,8 +455,11 @@ typedef struct XLogCtlData
XLogRecPtr recoveryLastRecPtr; XLogRecPtr recoveryLastRecPtr;
/* timestamp of last COMMIT/ABORT record replayed (or being replayed) */ /* timestamp of last COMMIT/ABORT record replayed (or being replayed) */
TimestampTz recoveryLastXTime; TimestampTz recoveryLastXTime;
/* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery */ /*
* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery
*/
TimestampTz currentChunkStartTime; TimestampTz currentChunkStartTime;
/* end of the last record restored from the archive */ /* end of the last record restored from the archive */
XLogRecPtr restoreLastRecPtr; XLogRecPtr restoreLastRecPtr;
@ -750,8 +753,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* insert lock, but it seems better to avoid doing CRC calculations while * insert lock, but it seems better to avoid doing CRC calculations while
* holding the lock. * holding the lock.
* *
* We add entries for backup blocks to the chain, so that they don't * We add entries for backup blocks to the chain, so that they don't need
* need any special treatment in the critical section where the chunks are * any special treatment in the critical section where the chunks are
* copied into the WAL buffers. Those entries have to be unlinked from the * copied into the WAL buffers. Those entries have to be unlinked from the
* chain if we have to loop back here. * chain if we have to loop back here.
*/ */
@ -896,10 +899,10 @@ begin:;
/* /*
* Calculate CRC of the data, including all the backup blocks * Calculate CRC of the data, including all the backup blocks
* *
* Note that the record header isn't added into the CRC initially since * Note that the record header isn't added into the CRC initially since we
* we don't know the prev-link yet. Thus, the CRC will represent the CRC * don't know the prev-link yet. Thus, the CRC will represent the CRC of
* of the whole record in the order: rdata, then backup blocks, then * the whole record in the order: rdata, then backup blocks, then record
* record header. * header.
*/ */
INIT_CRC32(rdata_crc); INIT_CRC32(rdata_crc);
for (rdt = rdata; rdt != NULL; rdt = rdt->next) for (rdt = rdata; rdt != NULL; rdt = rdt->next)
@ -948,10 +951,10 @@ begin:;
} }
/* /*
* Also check to see if fullPageWrites or forcePageWrites was just turned on; * Also check to see if fullPageWrites or forcePageWrites was just turned
* if we weren't already doing full-page writes then go back and recompute. * on; if we weren't already doing full-page writes then go back and
* (If it was just turned off, we could recompute the record without full pages, * recompute. (If it was just turned off, we could recompute the record
* but we choose not to bother.) * without full pages, but we choose not to bother.)
*/ */
if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites) if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites)
{ {
@ -1583,7 +1586,7 @@ AdvanceXLInsertBuffer(bool new_segment)
* would miss the opportunity to compress a few records. * would miss the opportunity to compress a few records.
*/ */
if (!Insert->forcePageWrites) if (!Insert->forcePageWrites)
NewPage->xlp_info |= XLP_BKP_REMOVABLE; NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
/* /*
* If first page of an XLOG segment file, make it a long header. * If first page of an XLOG segment file, make it a long header.
@ -1827,11 +1830,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
Write->lastSegSwitchTime = (pg_time_t) time(NULL); Write->lastSegSwitchTime = (pg_time_t) time(NULL);
/* /*
* Request a checkpoint if we've consumed too * Request a checkpoint if we've consumed too much xlog since
* much xlog since the last one. For speed, we first check * the last one. For speed, we first check using the local
* using the local copy of RedoRecPtr, which might be out of * copy of RedoRecPtr, which might be out of date; if it looks
* date; if it looks like a checkpoint is needed, forcibly * like a checkpoint is needed, forcibly update RedoRecPtr and
* update RedoRecPtr and recheck. * recheck.
*/ */
if (IsUnderPostmaster && if (IsUnderPostmaster &&
XLogCheckpointNeeded(openLogId, openLogSeg)) XLogCheckpointNeeded(openLogId, openLogSeg))
@ -1945,9 +1948,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
} }
/* /*
* Nudge the WALWriter: it has a full page of WAL to write, or we want * Nudge the WALWriter: it has a full page of WAL to write, or we want it
* it to come out of low-power mode so that this async commit will reach * to come out of low-power mode so that this async commit will reach disk
* disk within the expected amount of time. * within the expected amount of time.
*/ */
if (ProcGlobal->walwriterLatch) if (ProcGlobal->walwriterLatch)
SetLatch(ProcGlobal->walwriterLatch); SetLatch(ProcGlobal->walwriterLatch);
@ -2076,8 +2079,8 @@ XLogFlush(XLogRecPtr record)
WriteRqstPtr = record; WriteRqstPtr = record;
/* /*
* Now wait until we get the write lock, or someone else does the * Now wait until we get the write lock, or someone else does the flush
* flush for us. * for us.
*/ */
for (;;) for (;;)
{ {
@ -2273,7 +2276,8 @@ XLogBackgroundFlush(void)
/* /*
* If we wrote something then we have something to send to standbys also, * If we wrote something then we have something to send to standbys also,
* otherwise the replication delay become around 7s with just async commit. * otherwise the replication delay become around 7s with just async
* commit.
*/ */
if (wrote_something) if (wrote_something)
WalSndWakeup(); WalSndWakeup();
@ -2776,8 +2780,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
} }
/* /*
* If the segment was fetched from archival storage, replace * If the segment was fetched from archival storage, replace the existing
* the existing xlog segment (if any) with the archival version. * xlog segment (if any) with the archival version.
*/ */
if (source == XLOG_FROM_ARCHIVE) if (source == XLOG_FROM_ARCHIVE)
{ {
@ -5576,13 +5580,13 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
} }
/* /*
* If we are establishing a new timeline, we have to copy data from * If we are establishing a new timeline, we have to copy data from the
* the last WAL segment of the old timeline to create a starting WAL * last WAL segment of the old timeline to create a starting WAL segment
* segment for the new timeline. * for the new timeline.
* *
* Notify the archiver that the last WAL segment of the old timeline * Notify the archiver that the last WAL segment of the old timeline is
* is ready to copy to archival storage. Otherwise, it is not archived * ready to copy to archival storage. Otherwise, it is not archived for a
* for a while. * while.
*/ */
if (endTLI != ThisTimeLineID) if (endTLI != ThisTimeLineID)
{ {
@ -5604,8 +5608,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
XLogArchiveCleanup(xlogpath); XLogArchiveCleanup(xlogpath);
/* /*
* Since there might be a partial WAL segment named RECOVERYXLOG, * Since there might be a partial WAL segment named RECOVERYXLOG, get rid
* get rid of it. * of it.
*/ */
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG"); snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
unlink(recoveryPath); /* ignore any error */ unlink(recoveryPath); /* ignore any error */
@ -6323,11 +6327,11 @@ StartupXLOG(void)
/* /*
* Set backupStartPoint if we're starting recovery from a base backup. * Set backupStartPoint if we're starting recovery from a base backup.
* *
* Set backupEndPoint and use minRecoveryPoint as the backup end location * Set backupEndPoint and use minRecoveryPoint as the backup end
* if we're starting recovery from a base backup which was taken from * location if we're starting recovery from a base backup which was
* the standby. In this case, the database system status in pg_control must * taken from the standby. In this case, the database system status in
* indicate DB_IN_ARCHIVE_RECOVERY. If not, which means that backup * pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which
* is corrupted, so we cancel recovery. * means that backup is corrupted, so we cancel recovery.
*/ */
if (haveBackupLabel) if (haveBackupLabel)
{ {
@ -6383,15 +6387,15 @@ StartupXLOG(void)
/* /*
* We're in recovery, so unlogged relations may be trashed and must be * We're in recovery, so unlogged relations may be trashed and must be
* reset. This should be done BEFORE allowing Hot Standby connections, * reset. This should be done BEFORE allowing Hot Standby
* so that read-only backends don't try to read whatever garbage is * connections, so that read-only backends don't try to read whatever
* left over from before. * garbage is left over from before.
*/ */
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP); ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
/* /*
* Likewise, delete any saved transaction snapshot files that got * Likewise, delete any saved transaction snapshot files that got left
* left behind by crashed backends. * behind by crashed backends.
*/ */
DeleteAllExportedSnapshotFiles(); DeleteAllExportedSnapshotFiles();
@ -6489,10 +6493,11 @@ StartupXLOG(void)
/* /*
* Let postmaster know we've started redo now, so that it can launch * Let postmaster know we've started redo now, so that it can launch
* checkpointer to perform restartpoints. We don't bother during crash * checkpointer to perform restartpoints. We don't bother during
* recovery as restartpoints can only be performed during archive * crash recovery as restartpoints can only be performed during
* recovery. And we'd like to keep crash recovery simple, to avoid * archive recovery. And we'd like to keep crash recovery simple, to
* introducing bugs that could affect you when recovering after crash. * avoid introducing bugs that could affect you when recovering after
* crash.
* *
* After this point, we can no longer assume that we're the only * After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are * process in addition to postmaster! Also, fsync requests are
@ -6649,8 +6654,8 @@ StartupXLOG(void)
{ {
/* /*
* We have reached the end of base backup, the point where * We have reached the end of base backup, the point where
* the minimum recovery point in pg_control indicates. * the minimum recovery point in pg_control indicates. The
* The data on disk is now consistent. Reset backupStartPoint * data on disk is now consistent. Reset backupStartPoint
* and backupEndPoint. * and backupEndPoint.
*/ */
elog(DEBUG1, "end of backup reached"); elog(DEBUG1, "end of backup reached");
@ -6863,9 +6868,9 @@ StartupXLOG(void)
oldestActiveXID = PrescanPreparedTransactions(NULL, NULL); oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
/* /*
* Update full_page_writes in shared memory and write an * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
* XLOG_FPW_CHANGE record before resource manager writes cleanup * record before resource manager writes cleanup WAL records or checkpoint
* WAL records or checkpoint record is written. * record is written.
*/ */
Insert->fullPageWrites = lastFullPageWrites; Insert->fullPageWrites = lastFullPageWrites;
LocalSetXLogInsertAllowed(); LocalSetXLogInsertAllowed();
@ -6954,8 +6959,8 @@ StartupXLOG(void)
LWLockRelease(ProcArrayLock); LWLockRelease(ProcArrayLock);
/* /*
* Start up the commit log and subtrans, if not already done for * Start up the commit log and subtrans, if not already done for hot
* hot standby. * standby.
*/ */
if (standbyState == STANDBY_DISABLED) if (standbyState == STANDBY_DISABLED)
{ {
@ -7705,9 +7710,9 @@ CreateCheckPoint(int flags)
checkPoint.time = (pg_time_t) time(NULL); checkPoint.time = (pg_time_t) time(NULL);
/* /*
* For Hot Standby, derive the oldestActiveXid before we fix the redo pointer. * For Hot Standby, derive the oldestActiveXid before we fix the redo
* This allows us to begin accumulating changes to assemble our starting * pointer. This allows us to begin accumulating changes to assemble our
* snapshot of locks and transactions. * starting snapshot of locks and transactions.
*/ */
if (!shutdown && XLogStandbyInfoActive()) if (!shutdown && XLogStandbyInfoActive())
checkPoint.oldestActiveXid = GetOldestActiveTransactionId(); checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
@ -8082,10 +8087,11 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
} }
/* /*
* Also refrain from creating a restartpoint if we have seen any references * Also refrain from creating a restartpoint if we have seen any
* to non-existent pages. Restarting recovery from the restartpoint would * references to non-existent pages. Restarting recovery from the
* not see the references, so we would lose the cross-check that the pages * restartpoint would not see the references, so we would lose the
* belonged to a relation that was dropped later. * cross-check that the pages belonged to a relation that was dropped
* later.
*/ */
if (XLogHaveInvalidPages()) if (XLogHaveInvalidPages())
{ {
@ -8098,8 +8104,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
} }
/* /*
* Copy the checkpoint record to shared memory, so that checkpointer * Copy the checkpoint record to shared memory, so that checkpointer can
* can work out the next time it wants to perform a restartpoint. * work out the next time it wants to perform a restartpoint.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
XLogCtl->lastCheckPointRecPtr = ReadRecPtr; XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
@ -8493,8 +8499,8 @@ UpdateFullPageWrites(void)
* Do nothing if full_page_writes has not been changed. * Do nothing if full_page_writes has not been changed.
* *
* It's safe to check the shared full_page_writes without the lock, * It's safe to check the shared full_page_writes without the lock,
* because we assume that there is no concurrently running process * because we assume that there is no concurrently running process which
* which can update it. * can update it.
*/ */
if (fullPageWrites == Insert->fullPageWrites) if (fullPageWrites == Insert->fullPageWrites)
return; return;
@ -8505,8 +8511,8 @@ UpdateFullPageWrites(void)
* It's always safe to take full page images, even when not strictly * It's always safe to take full page images, even when not strictly
* required, but not the other round. So if we're setting full_page_writes * required, but not the other round. So if we're setting full_page_writes
* to true, first set it true and then write the WAL record. If we're * to true, first set it true and then write the WAL record. If we're
* setting it to false, first write the WAL record and then set the * setting it to false, first write the WAL record and then set the global
* global flag. * flag.
*/ */
if (fullPageWrites) if (fullPageWrites)
{ {
@ -8516,8 +8522,8 @@ UpdateFullPageWrites(void)
} }
/* /*
* Write an XLOG_FPW_CHANGE record. This allows us to keep * Write an XLOG_FPW_CHANGE record. This allows us to keep track of
* track of full_page_writes during archive recovery, if required. * full_page_writes during archive recovery, if required.
*/ */
if (XLogStandbyInfoActive() && !RecoveryInProgress()) if (XLogStandbyInfoActive() && !RecoveryInProgress())
{ {
@ -8797,9 +8803,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&fpw, XLogRecGetData(record), sizeof(bool)); memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
/* /*
* Update the LSN of the last replayed XLOG_FPW_CHANGE record * Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
* so that do_pg_start_backup() and do_pg_stop_backup() can check * do_pg_start_backup() and do_pg_stop_backup() can check whether
* whether full_page_writes has been disabled during online backup. * full_page_writes has been disabled during online backup.
*/ */
if (!fpw) if (!fpw)
{ {
@ -9115,8 +9121,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
errhint("WAL control functions cannot be executed during recovery."))); errhint("WAL control functions cannot be executed during recovery.")));
/* /*
* During recovery, we don't need to check WAL level. Because, if WAL level * During recovery, we don't need to check WAL level. Because, if WAL
* is not sufficient, it's impossible to get here during recovery. * level is not sufficient, it's impossible to get here during recovery.
*/ */
if (!backup_started_in_recovery && !XLogIsNeeded()) if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR, ereport(ERROR,
@ -9207,12 +9213,13 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
* will have different checkpoint positions and hence different * will have different checkpoint positions and hence different
* history file names, even if nothing happened in between. * history file names, even if nothing happened in between.
* *
* During recovery, establish a restartpoint if possible. We use the last * During recovery, establish a restartpoint if possible. We use
* restartpoint as the backup starting checkpoint. This means that two * the last restartpoint as the backup starting checkpoint. This
* successive backup runs can have same checkpoint positions. * means that two successive backup runs can have same checkpoint
* positions.
* *
* Since the fact that we are executing do_pg_start_backup() during * Since the fact that we are executing do_pg_start_backup()
* recovery means that checkpointer is running, we can use * during recovery means that checkpointer is running, we can use
* RequestCheckpoint() to establish a restartpoint. * RequestCheckpoint() to establish a restartpoint.
* *
* We use CHECKPOINT_IMMEDIATE only if requested by user (via * We use CHECKPOINT_IMMEDIATE only if requested by user (via
@ -9240,9 +9247,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
XLogRecPtr recptr; XLogRecPtr recptr;
/* /*
* Check to see if all WAL replayed during online backup (i.e., * Check to see if all WAL replayed during online backup
* since last restartpoint used as backup starting checkpoint) * (i.e., since last restartpoint used as backup starting
* contain full-page writes. * checkpoint) contain full-page writes.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->lastFpwDisableRecPtr; recptr = xlogctl->lastFpwDisableRecPtr;
@ -9260,10 +9267,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
/* /*
* During recovery, since we don't use the end-of-backup WAL * During recovery, since we don't use the end-of-backup WAL
* record and don't write the backup history file, the starting WAL * record and don't write the backup history file, the
* location doesn't need to be unique. This means that two base * starting WAL location doesn't need to be unique. This means
* backups started at the same time might use the same checkpoint * that two base backups started at the same time might use
* as starting locations. * the same checkpoint as starting locations.
*/ */
gotUniqueStartpoint = true; gotUniqueStartpoint = true;
} }
@ -9443,8 +9450,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
errhint("WAL control functions cannot be executed during recovery."))); errhint("WAL control functions cannot be executed during recovery.")));
/* /*
* During recovery, we don't need to check WAL level. Because, if WAL level * During recovery, we don't need to check WAL level. Because, if WAL
* is not sufficient, it's impossible to get here during recovery. * level is not sufficient, it's impossible to get here during recovery.
*/ */
if (!backup_started_in_recovery && !XLogIsNeeded()) if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR, ereport(ERROR,
@ -9537,9 +9544,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */ remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
/* /*
* Parse the BACKUP FROM line. If we are taking an online backup from * Parse the BACKUP FROM line. If we are taking an online backup from the
* the standby, we confirm that the standby has not been promoted * standby, we confirm that the standby has not been promoted during the
* during the backup. * backup.
*/ */
ptr = strstr(remaining, "BACKUP FROM:"); ptr = strstr(remaining, "BACKUP FROM:");
if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1) if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1)
@ -9555,30 +9562,30 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
"Try taking another online backup."))); "Try taking another online backup.")));
/* /*
* During recovery, we don't write an end-of-backup record. We assume * During recovery, we don't write an end-of-backup record. We assume that
* that pg_control was backed up last and its minimum recovery * pg_control was backed up last and its minimum recovery point can be
* point can be available as the backup end location. Since we don't * available as the backup end location. Since we don't have an
* have an end-of-backup record, we use the pg_control value to check * end-of-backup record, we use the pg_control value to check whether
* whether we've reached the end of backup when starting recovery from * we've reached the end of backup when starting recovery from this
* this backup. We have no way of checking if pg_control wasn't backed * backup. We have no way of checking if pg_control wasn't backed up last
* up last however. * however.
* *
* We don't force a switch to new WAL file and wait for all the required * We don't force a switch to new WAL file and wait for all the required
* files to be archived. This is okay if we use the backup to start * files to be archived. This is okay if we use the backup to start the
* the standby. But, if it's for an archive recovery, to ensure all the * standby. But, if it's for an archive recovery, to ensure all the
* required files are available, a user should wait for them to be archived, * required files are available, a user should wait for them to be
* or include them into the backup. * archived, or include them into the backup.
* *
* We return the current minimum recovery point as the backup end * We return the current minimum recovery point as the backup end
* location. Note that it's would be bigger than the exact backup end * location. Note that it's would be bigger than the exact backup end
* location if the minimum recovery point is updated since the backup * location if the minimum recovery point is updated since the backup of
* of pg_control. This is harmless for current uses. * pg_control. This is harmless for current uses.
* *
* XXX currently a backup history file is for informational and debug * XXX currently a backup history file is for informational and debug
* purposes only. It's not essential for an online backup. Furthermore, * purposes only. It's not essential for an online backup. Furthermore,
* even if it's created, it will not be archived during recovery because * even if it's created, it will not be archived during recovery because
* an archiver is not invoked. So it doesn't seem worthwhile to write * an archiver is not invoked. So it doesn't seem worthwhile to write a
* a backup history file during recovery. * backup history file during recovery.
*/ */
if (backup_started_in_recovery) if (backup_started_in_recovery)
{ {
@ -9905,10 +9912,11 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
ereport(FATAL, ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE))); errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
/* /*
* BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore
* restore from an older backup anyway, but since the information on it * from an older backup anyway, but since the information on it is not
* is not strictly required, don't error out if it's missing for some reason. * strictly required, don't error out if it's missing for some reason.
*/ */
if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1) if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
{ {
@ -10050,8 +10058,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg)) if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{ {
/* /*
* Request a restartpoint if we've replayed too much * Request a restartpoint if we've replayed too much xlog since the
* xlog since the last one. * last one.
*/ */
if (StandbyMode && bgwriterLaunched) if (StandbyMode && bgwriterLaunched)
{ {

View File

@ -80,10 +80,10 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/* /*
* Once recovery has reached a consistent state, the invalid-page table * Once recovery has reached a consistent state, the invalid-page table
* should be empty and remain so. If a reference to an invalid page is * should be empty and remain so. If a reference to an invalid page is
* found after consistency is reached, PANIC immediately. This might * found after consistency is reached, PANIC immediately. This might seem
* seem aggressive, but it's better than letting the invalid reference * aggressive, but it's better than letting the invalid reference linger
* linger in the hash table until the end of recovery and PANIC there, * in the hash table until the end of recovery and PANIC there, which
* which might come only much later if this is a standby server. * might come only much later if this is a standby server.
*/ */
if (reachedConsistency) if (reachedConsistency)
{ {

View File

@ -189,7 +189,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
AclItem aclitem; AclItem aclitem;
Acl *newer_acl; Acl *newer_acl;
aclitem. ai_grantee = lfirst_oid(j); aclitem.ai_grantee = lfirst_oid(j);
/* /*
* Grant options can only be granted to individual roles, not PUBLIC. * Grant options can only be granted to individual roles, not PUBLIC.
@ -202,7 +202,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
(errcode(ERRCODE_INVALID_GRANT_OPERATION), (errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to roles"))); errmsg("grant options can only be granted to roles")));
aclitem. ai_grantor = grantorId; aclitem.ai_grantor = grantorId;
/* /*
* The asymmetry in the conditions here comes from the spec. In * The asymmetry in the conditions here comes from the spec. In

View File

@ -352,7 +352,8 @@ performMultipleDeletions(const ObjectAddresses *objects,
free_object_addresses(targetObjects); free_object_addresses(targetObjects);
/* /*
* We closed depRel earlier in deleteOneObject if doing a drop concurrently * We closed depRel earlier in deleteOneObject if doing a drop
* concurrently
*/ */
if ((flags & PERFORM_DELETION_CONCURRENTLY) != PERFORM_DELETION_CONCURRENTLY) if ((flags & PERFORM_DELETION_CONCURRENTLY) != PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock); heap_close(depRel, RowExclusiveLock);
@ -517,9 +518,9 @@ findDependentObjects(const ObjectAddress *object,
* owner). If so, and if we aren't recursing from the owning object, we * owner). If so, and if we aren't recursing from the owning object, we
* have to transform this deletion request into a deletion request of the * have to transform this deletion request into a deletion request of the
* owning object. (We'll eventually recurse back to this object, but the * owning object. (We'll eventually recurse back to this object, but the
* owning object has to be visited first so it will be deleted after.) * owning object has to be visited first so it will be deleted after.) The
* The way to find out about this is to scan the pg_depend entries that * way to find out about this is to scan the pg_depend entries that show
* show what this object depends on. * what this object depends on.
*/ */
ScanKeyInit(&key[0], ScanKeyInit(&key[0],
Anum_pg_depend_classid, Anum_pg_depend_classid,
@ -593,8 +594,8 @@ findDependentObjects(const ObjectAddress *object,
* Exception 1b: if the owning object is the extension * Exception 1b: if the owning object is the extension
* currently being created/altered, it's okay to continue * currently being created/altered, it's okay to continue
* with the deletion. This allows dropping of an * with the deletion. This allows dropping of an
* extension's objects within the extension's scripts, * extension's objects within the extension's scripts, as
* as well as corner cases such as dropping a transient * well as corner cases such as dropping a transient
* object created within such a script. * object created within such a script.
*/ */
if (creating_extension && if (creating_extension &&
@ -618,8 +619,8 @@ findDependentObjects(const ObjectAddress *object,
* it's okay to continue with the deletion. This holds when * it's okay to continue with the deletion. This holds when
* recursing from a whole object that includes the nominal * recursing from a whole object that includes the nominal
* other end as a component, too. Since there can be more * other end as a component, too. Since there can be more
* than one "owning" object, we have to allow matches that * than one "owning" object, we have to allow matches that are
* are more than one level down in the stack. * more than one level down in the stack.
*/ */
if (stack_address_present_add_flags(&otherObject, 0, stack)) if (stack_address_present_add_flags(&otherObject, 0, stack))
break; break;
@ -1000,6 +1001,7 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
if (object_access_hook) if (object_access_hook)
{ {
ObjectAccessDrop drop_arg; ObjectAccessDrop drop_arg;
drop_arg.dropflags = flags; drop_arg.dropflags = flags;
InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId, InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId,
object->objectSubId, &drop_arg); object->objectSubId, &drop_arg);
@ -1049,8 +1051,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
object->objectSubId); object->objectSubId);
/* /*
* Close depRel if we are doing a drop concurrently because it * Close depRel if we are doing a drop concurrently because it commits the
* commits the transaction, so we don't want dangling references. * transaction, so we don't want dangling references.
*/ */
if ((flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY) if ((flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock); heap_close(depRel, RowExclusiveLock);

View File

@ -1324,8 +1324,8 @@ index_drop(Oid indexId, bool concurrent)
CheckTableNotInUse(userIndexRelation, "DROP INDEX"); CheckTableNotInUse(userIndexRelation, "DROP INDEX");
/* /*
* Drop Index concurrently is similar in many ways to creating an * Drop Index concurrently is similar in many ways to creating an index
* index concurrently, so some actions are similar to DefineIndex() * concurrently, so some actions are similar to DefineIndex()
*/ */
if (concurrent) if (concurrent)
{ {
@ -1373,15 +1373,15 @@ index_drop(Oid indexId, bool concurrent)
* will be marked not indisvalid, so that no one else tries to either * will be marked not indisvalid, so that no one else tries to either
* insert into it or use it for queries. * insert into it or use it for queries.
* *
* We must commit our current transaction so that the index update becomes * We must commit our current transaction so that the index update
* visible; then start another. Note that all the data structures we just * becomes visible; then start another. Note that all the data
* built are lost in the commit. The only data we keep past here are the * structures we just built are lost in the commit. The only data we
* relation IDs. * keep past here are the relation IDs.
* *
* Before committing, get a session-level lock on the table, to ensure * Before committing, get a session-level lock on the table, to ensure
* that neither it nor the index can be dropped before we finish. This * that neither it nor the index can be dropped before we finish. This
* cannot block, even if someone else is waiting for access, because we * cannot block, even if someone else is waiting for access, because
* already have the same lock within our transaction. * we already have the same lock within our transaction.
*/ */
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock); LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock); LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock);
@ -1391,23 +1391,23 @@ index_drop(Oid indexId, bool concurrent)
StartTransactionCommand(); StartTransactionCommand();
/* /*
* Now we must wait until no running transaction could have the table open * Now we must wait until no running transaction could have the table
* with the old list of indexes. To do this, inquire which xacts * open with the old list of indexes. To do this, inquire which xacts
* currently would conflict with AccessExclusiveLock on the table -- ie, * currently would conflict with AccessExclusiveLock on the table --
* which ones have a lock of any kind on the table. Then wait for each of * ie, which ones have a lock of any kind on the table. Then wait for
* these xacts to commit or abort. Note we do not need to worry about * each of these xacts to commit or abort. Note we do not need to
* xacts that open the table for writing after this point; they will see * worry about xacts that open the table for writing after this point;
* the index as invalid when they open the relation. * they will see the index as invalid when they open the relation.
* *
* Note: the reason we use actual lock acquisition here, rather than just * Note: the reason we use actual lock acquisition here, rather than
* checking the ProcArray and sleeping, is that deadlock is possible if * just checking the ProcArray and sleeping, is that deadlock is
* one of the transactions in question is blocked trying to acquire an * possible if one of the transactions in question is blocked trying
* exclusive lock on our table. The lock code will detect deadlock and * to acquire an exclusive lock on our table. The lock code will
* error out properly. * detect deadlock and error out properly.
* *
* Note: GetLockConflicts() never reports our own xid, hence we need not * Note: GetLockConflicts() never reports our own xid, hence we need
* check for that. Also, prepared xacts are not reported, which is fine * not check for that. Also, prepared xacts are not reported, which
* since they certainly aren't going to do anything more. * is fine since they certainly aren't going to do anything more.
*/ */
old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock); old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);

View File

@ -247,11 +247,11 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
} }
/* /*
* DDL operations can change the results of a name lookup. Since all * DDL operations can change the results of a name lookup. Since all such
* such operations will generate invalidation messages, we keep track * operations will generate invalidation messages, we keep track of
* of whether any such messages show up while we're performing the * whether any such messages show up while we're performing the operation,
* operation, and retry until either (1) no more invalidation messages * and retry until either (1) no more invalidation messages show up or (2)
* show up or (2) the answer doesn't change. * the answer doesn't change.
* *
* But if lockmode = NoLock, then we assume that either the caller is OK * But if lockmode = NoLock, then we assume that either the caller is OK
* with the answer changing under them, or that they already hold some * with the answer changing under them, or that they already hold some
@ -259,8 +259,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
* checking for invalidation messages. Also, if the requested lock is * checking for invalidation messages. Also, if the requested lock is
* already held, no LockRelationOid will not AcceptInvalidationMessages, * already held, no LockRelationOid will not AcceptInvalidationMessages,
* so we may fail to notice a change. We could protect against that case * so we may fail to notice a change. We could protect against that case
* by calling AcceptInvalidationMessages() before beginning this loop, * by calling AcceptInvalidationMessages() before beginning this loop, but
* but that would add a significant amount overhead, so for now we don't. * that would add a significant amount overhead, so for now we don't.
*/ */
for (;;) for (;;)
{ {
@ -288,6 +288,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
if (relation->schemaname) if (relation->schemaname)
{ {
Oid namespaceId; Oid namespaceId;
namespaceId = LookupExplicitNamespace(relation->schemaname); namespaceId = LookupExplicitNamespace(relation->schemaname);
if (namespaceId != myTempNamespace) if (namespaceId != myTempNamespace)
ereport(ERROR, ereport(ERROR,
@ -315,12 +316,12 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/* /*
* Invoke caller-supplied callback, if any. * Invoke caller-supplied callback, if any.
* *
* This callback is a good place to check permissions: we haven't taken * This callback is a good place to check permissions: we haven't
* the table lock yet (and it's really best to check permissions before * taken the table lock yet (and it's really best to check permissions
* locking anything!), but we've gotten far enough to know what OID we * before locking anything!), but we've gotten far enough to know what
* think we should lock. Of course, concurrent DDL might change things * OID we think we should lock. Of course, concurrent DDL might
* while we're waiting for the lock, but in that case the callback will * change things while we're waiting for the lock, but in that case
* be invoked again for the new OID. * the callback will be invoked again for the new OID.
*/ */
if (callback) if (callback)
callback(relation, relId, oldRelId, callback_arg); callback(relation, relId, oldRelId, callback_arg);
@ -328,21 +329,21 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/* /*
* If no lock requested, we assume the caller knows what they're * If no lock requested, we assume the caller knows what they're
* doing. They should have already acquired a heavyweight lock on * doing. They should have already acquired a heavyweight lock on
* this relation earlier in the processing of this same statement, * this relation earlier in the processing of this same statement, so
* so it wouldn't be appropriate to AcceptInvalidationMessages() * it wouldn't be appropriate to AcceptInvalidationMessages() here, as
* here, as that might pull the rug out from under them. * that might pull the rug out from under them.
*/ */
if (lockmode == NoLock) if (lockmode == NoLock)
break; break;
/* /*
* If, upon retry, we get back the same OID we did last time, then * If, upon retry, we get back the same OID we did last time, then the
* the invalidation messages we processed did not change the final * invalidation messages we processed did not change the final answer.
* answer. So we're done. * So we're done.
* *
* If we got a different OID, we've locked the relation that used to * If we got a different OID, we've locked the relation that used to
* have this name rather than the one that does now. So release * have this name rather than the one that does now. So release the
* the lock. * lock.
*/ */
if (retry) if (retry)
{ {
@ -384,8 +385,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
break; break;
/* /*
* Something may have changed. Let's repeat the name lookup, to * Something may have changed. Let's repeat the name lookup, to make
* make sure this name still references the same relation it did * sure this name still references the same relation it did
* previously. * previously.
*/ */
retry = true; retry = true;
@ -550,8 +551,8 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation,
relid = InvalidOid; relid = InvalidOid;
/* /*
* In bootstrap processing mode, we don't bother with permissions * In bootstrap processing mode, we don't bother with permissions or
* or locking. Permissions might not be working yet, and locking is * locking. Permissions might not be working yet, and locking is
* unnecessary. * unnecessary.
*/ */
if (IsBootstrapProcessingMode()) if (IsBootstrapProcessingMode())

View File

@ -286,9 +286,9 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
for (;;) for (;;)
{ {
/* /*
* Remember this value, so that, after looking up the object name * Remember this value, so that, after looking up the object name and
* and locking it, we can check whether any invalidation messages * locking it, we can check whether any invalidation messages have
* have been processed that might require a do-over. * been processed that might require a do-over.
*/ */
inval_count = SharedInvalidMessageCounter; inval_count = SharedInvalidMessageCounter;
@ -475,8 +475,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* At this point, we've resolved the name to an OID and locked the * At this point, we've resolved the name to an OID and locked the
* corresponding database object. However, it's possible that by the * corresponding database object. However, it's possible that by the
* time we acquire the lock on the object, concurrent DDL has modified * time we acquire the lock on the object, concurrent DDL has modified
* the database in such a way that the name we originally looked up * the database in such a way that the name we originally looked up no
* no longer resolves to that OID. * longer resolves to that OID.
* *
* We can be certain that this isn't an issue if (a) no shared * We can be certain that this isn't an issue if (a) no shared
* invalidation messages have been processed or (b) we've locked a * invalidation messages have been processed or (b) we've locked a
@ -488,8 +488,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* the relation, which is enough to freeze out any concurrent DDL. * the relation, which is enough to freeze out any concurrent DDL.
* *
* In all other cases, however, it's possible that the name we looked * In all other cases, however, it's possible that the name we looked
* up no longer refers to the object we locked, so we retry the * up no longer refers to the object we locked, so we retry the lookup
* lookup and see whether we get the same answer. * and see whether we get the same answer.
*/ */
if (inval_count == SharedInvalidMessageCounter || relation != NULL) if (inval_count == SharedInvalidMessageCounter || relation != NULL)
break; break;
@ -721,8 +721,8 @@ get_object_address_relobject(ObjectType objtype, List *objname,
address.objectSubId = 0; address.objectSubId = 0;
/* /*
* Caller is expecting to get back the relation, even though we * Caller is expecting to get back the relation, even though we didn't
* didn't end up using it to find the rule. * end up using it to find the rule.
*/ */
if (OidIsValid(address.objectId)) if (OidIsValid(address.objectId))
relation = heap_open(reloid, AccessShareLock); relation = heap_open(reloid, AccessShareLock);
@ -837,6 +837,7 @@ get_object_address_type(ObjectType objtype,
ObjectAddress address; ObjectAddress address;
TypeName *typename; TypeName *typename;
Type tup; Type tup;
typename = makeTypeNameFromNameList(objname); typename = makeTypeNameFromNameList(objname);
address.classId = TypeRelationId; address.classId = TypeRelationId;

View File

@ -500,8 +500,8 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
/* /*
* Forcibly create relation if it doesn't exist (which suggests that * Forcibly create relation if it doesn't exist (which suggests that
* it was dropped somewhere later in the WAL sequence). As in * it was dropped somewhere later in the WAL sequence). As in
* XLogReadBuffer, we prefer to recreate the rel and replay the log * XLogReadBuffer, we prefer to recreate the rel and replay the log as
* as best we can until the drop is seen. * best we can until the drop is seen.
*/ */
smgrcreate(reln, MAIN_FORKNUM, true); smgrcreate(reln, MAIN_FORKNUM, true);

View File

@ -205,8 +205,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
} }
/* /*
* Check that it's a plain table or foreign table; we used to do this * Check that it's a plain table or foreign table; we used to do this in
* in get_rel_oids() but seems safer to check after we've locked the * get_rel_oids() but seems safer to check after we've locked the
* relation. * relation.
*/ */
if (onerel->rd_rel->relkind == RELKIND_RELATION) if (onerel->rd_rel->relkind == RELKIND_RELATION)
@ -464,8 +464,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/* /*
* Determine how many rows we need to sample, using the worst case from * Determine how many rows we need to sample, using the worst case from
* all analyzable columns. We use a lower bound of 100 rows to avoid * all analyzable columns. We use a lower bound of 100 rows to avoid
* possible overflow in Vitter's algorithm. (Note: that will also be * possible overflow in Vitter's algorithm. (Note: that will also be the
* the target in the corner case where there are no analyzable columns.) * target in the corner case where there are no analyzable columns.)
*/ */
targrows = 100; targrows = 100;
for (i = 0; i < attr_cnt; i++) for (i = 0; i < attr_cnt; i++)

View File

@ -594,10 +594,10 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
OldHeapDesc = RelationGetDescr(OldHeap); OldHeapDesc = RelationGetDescr(OldHeap);
/* /*
* Note that the NewHeap will not * Note that the NewHeap will not receive any of the defaults or
* receive any of the defaults or constraints associated with the OldHeap; * constraints associated with the OldHeap; we don't need 'em, and there's
* we don't need 'em, and there's no reason to spend cycles inserting them * no reason to spend cycles inserting them into the catalogs only to
* into the catalogs only to delete them. * delete them.
*/ */
/* /*

View File

@ -1861,6 +1861,7 @@ CopyFrom(CopyState cstate)
uint64 processed = 0; uint64 processed = 0;
bool useHeapMultiInsert; bool useHeapMultiInsert;
int nBufferedTuples = 0; int nBufferedTuples = 0;
#define MAX_BUFFERED_TUPLES 1000 #define MAX_BUFFERED_TUPLES 1000
HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */ HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */
Size bufferedTuplesSize = 0; Size bufferedTuplesSize = 0;
@ -2162,8 +2163,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
int i; int i;
/* /*
* heap_multi_insert leaks memory, so switch to short-lived memory * heap_multi_insert leaks memory, so switch to short-lived memory context
* context before calling it. * before calling it.
*/ */
oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
heap_multi_insert(cstate->rel, heap_multi_insert(cstate->rel,
@ -2175,8 +2176,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
MemoryContextSwitchTo(oldcontext); MemoryContextSwitchTo(oldcontext);
/* /*
* If there are any indexes, update them for all the inserted tuples, * If there are any indexes, update them for all the inserted tuples, and
* and run AFTER ROW INSERT triggers. * run AFTER ROW INSERT triggers.
*/ */
if (resultRelInfo->ri_NumIndices > 0) if (resultRelInfo->ri_NumIndices > 0)
{ {
@ -2194,6 +2195,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
list_free(recheckIndexes); list_free(recheckIndexes);
} }
} }
/* /*
* There's no indexes, but see if we need to run AFTER ROW INSERT triggers * There's no indexes, but see if we need to run AFTER ROW INSERT triggers
* anyway. * anyway.

View File

@ -115,10 +115,10 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/* /*
* Use a snapshot with an updated command ID to ensure this query sees * Use a snapshot with an updated command ID to ensure this query sees
* results of any previously executed queries. (This could only matter * results of any previously executed queries. (This could only matter if
* if the planner executed an allegedly-stable function that changed * the planner executed an allegedly-stable function that changed the
* the database contents, but let's do it anyway to be parallel to the * database contents, but let's do it anyway to be parallel to the EXPLAIN
* EXPLAIN code path.) * code path.)
*/ */
PushCopiedSnapshot(GetActiveSnapshot()); PushCopiedSnapshot(GetActiveSnapshot());
UpdateActiveSnapshotCommandId(); UpdateActiveSnapshotCommandId();
@ -237,8 +237,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
create->if_not_exists = false; create->if_not_exists = false;
/* /*
* Build column definitions using "pre-cooked" type and collation info. * Build column definitions using "pre-cooked" type and collation info. If
* If a column name list was specified in CREATE TABLE AS, override the * a column name list was specified in CREATE TABLE AS, override the
* column names derived from the query. (Too few column names are OK, too * column names derived from the query. (Too few column names are OK, too
* many are not.) * many are not.)
*/ */
@ -280,9 +280,9 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
/* /*
* It's possible that the column is of a collatable type but the * It's possible that the column is of a collatable type but the
* collation could not be resolved, so double-check. (We must * collation could not be resolved, so double-check. (We must check
* check this here because DefineRelation would adopt the type's * this here because DefineRelation would adopt the type's default
* default collation rather than complaining.) * collation rather than complaining.)
*/ */
if (!OidIsValid(col->collOid) && if (!OidIsValid(col->collOid) &&
type_is_collatable(coltype->typeOid)) type_is_collatable(coltype->typeOid))

View File

@ -785,6 +785,7 @@ dropdb(const char *dbname, bool missing_ok)
if (object_access_hook) if (object_access_hook)
{ {
ObjectAccessDrop drop_arg; ObjectAccessDrop drop_arg;
memset(&drop_arg, 0, sizeof(ObjectAccessDrop)); memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP, InvokeObjectAccessHook(OAT_DROP,
DatabaseRelationId, db_id, 0, &drop_arg); DatabaseRelationId, db_id, 0, &drop_arg);
@ -831,8 +832,7 @@ dropdb(const char *dbname, bool missing_ok)
ReleaseSysCache(tup); ReleaseSysCache(tup);
/* /*
* Delete any comments or security labels associated with * Delete any comments or security labels associated with the database.
* the database.
*/ */
DeleteSharedComments(db_id, DatabaseRelationId); DeleteSharedComments(db_id, DatabaseRelationId);
DeleteSharedSecurityLabel(db_id, DatabaseRelationId); DeleteSharedSecurityLabel(db_id, DatabaseRelationId);
@ -860,18 +860,18 @@ dropdb(const char *dbname, bool missing_ok)
pgstat_drop_database(db_id); pgstat_drop_database(db_id);
/* /*
* Tell checkpointer to forget any pending fsync and unlink requests for files * Tell checkpointer to forget any pending fsync and unlink requests for
* in the database; else the fsyncs will fail at next checkpoint, or * files in the database; else the fsyncs will fail at next checkpoint, or
* worse, it will delete files that belong to a newly created database * worse, it will delete files that belong to a newly created database
* with the same OID. * with the same OID.
*/ */
ForgetDatabaseFsyncRequests(db_id); ForgetDatabaseFsyncRequests(db_id);
/* /*
* Force a checkpoint to make sure the checkpointer has received the message * Force a checkpoint to make sure the checkpointer has received the
* sent by ForgetDatabaseFsyncRequests. On Windows, this also ensures that * message sent by ForgetDatabaseFsyncRequests. On Windows, this also
* background procs don't hold any open files, which would cause rmdir() to * ensures that background procs don't hold any open files, which would
* fail. * cause rmdir() to fail.
*/ */
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT);

View File

@ -231,7 +231,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
args = strVal(linitial(objargs)); args = strVal(linitial(objargs));
break; break;
default: default:
elog(ERROR, "unexpected object type (%d)", (int)objtype); elog(ERROR, "unexpected object type (%d)", (int) objtype);
break; break;
} }

View File

@ -340,9 +340,9 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
if (IsA(utilityStmt, CreateTableAsStmt)) if (IsA(utilityStmt, CreateTableAsStmt))
{ {
/* /*
* We have to rewrite the contained SELECT and then pass it back * We have to rewrite the contained SELECT and then pass it back to
* to ExplainOneQuery. It's probably not really necessary to copy * ExplainOneQuery. It's probably not really necessary to copy the
* the contained parsetree another time, but let's be safe. * contained parsetree another time, but let's be safe.
*/ */
CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt; CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt;
List *rewritten; List *rewritten;

View File

@ -890,9 +890,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
ReleaseSysCache(languageTuple); ReleaseSysCache(languageTuple);
/* /*
* Only superuser is allowed to create leakproof functions because * Only superuser is allowed to create leakproof functions because it
* it possibly allows unprivileged users to reference invisible tuples * possibly allows unprivileged users to reference invisible tuples to be
* to be filtered out using views for row-level security. * filtered out using views for row-level security.
*/ */
if (isLeakProof && !superuser()) if (isLeakProof && !superuser())
ereport(ERROR, ereport(ERROR,

View File

@ -134,6 +134,7 @@ CheckIndexCompatible(Oid oldId,
/* Caller should already have the relation locked in some way. */ /* Caller should already have the relation locked in some way. */
relationId = RangeVarGetRelid(heapRelation, NoLock, false); relationId = RangeVarGetRelid(heapRelation, NoLock, false);
/* /*
* We can pretend isconstraint = false unconditionally. It only serves to * We can pretend isconstraint = false unconditionally. It only serves to
* decide the text of an error message that should never happen for us. * decide the text of an error message that should never happen for us.
@ -157,10 +158,10 @@ CheckIndexCompatible(Oid oldId,
ReleaseSysCache(tuple); ReleaseSysCache(tuple);
/* /*
* Compute the operator classes, collations, and exclusion operators * Compute the operator classes, collations, and exclusion operators for
* for the new index, so we can test whether it's compatible with the * the new index, so we can test whether it's compatible with the existing
* existing one. Note that ComputeIndexAttrs might fail here, but that's * one. Note that ComputeIndexAttrs might fail here, but that's OK:
* OK: DefineIndex would have called this function with the same arguments * DefineIndex would have called this function with the same arguments
* later on, and it would have failed then anyway. * later on, and it would have failed then anyway.
*/ */
indexInfo = makeNode(IndexInfo); indexInfo = makeNode(IndexInfo);
@ -232,7 +233,8 @@ CheckIndexCompatible(Oid oldId,
/* Any change in exclusion operator selections breaks compatibility. */ /* Any change in exclusion operator selections breaks compatibility. */
if (ret && indexInfo->ii_ExclusionOps != NULL) if (ret && indexInfo->ii_ExclusionOps != NULL)
{ {
Oid *old_operators, *old_procs; Oid *old_operators,
*old_procs;
uint16 *old_strats; uint16 *old_strats;
RelationGetExclusionInfo(irel, &old_operators, &old_procs, &old_strats); RelationGetExclusionInfo(irel, &old_operators, &old_procs, &old_strats);
@ -1778,9 +1780,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
return; return;
/* /*
* If the relation does exist, check whether it's an index. But note * If the relation does exist, check whether it's an index. But note that
* that the relation might have been dropped between the time we did the * the relation might have been dropped between the time we did the name
* name lookup and now. In that case, there's nothing to do. * lookup and now. In that case, there's nothing to do.
*/ */
relkind = get_rel_relkind(relId); relkind = get_rel_relkind(relId);
if (!relkind) if (!relkind)
@ -1798,9 +1800,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
if (relId != oldRelId) if (relId != oldRelId)
{ {
/* /*
* Lock level here should match reindex_index() heap lock. * Lock level here should match reindex_index() heap lock. If the OID
* If the OID isn't valid, it means the index as concurrently dropped, * isn't valid, it means the index as concurrently dropped, which is
* which is not a problem for us; just return normally. * not a problem for us; just return normally.
*/ */
*heapOid = IndexGetRelation(relId, true); *heapOid = IndexGetRelation(relId, true);
if (OidIsValid(*heapOid)) if (OidIsValid(*heapOid))

View File

@ -40,9 +40,9 @@ LockTableCommand(LockStmt *lockstmt)
/* /*
* During recovery we only accept these variations: LOCK TABLE foo IN * During recovery we only accept these variations: LOCK TABLE foo IN
* ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo * ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo IN
* IN ROW EXCLUSIVE MODE This test must match the restrictions defined * ROW EXCLUSIVE MODE This test must match the restrictions defined in
* in LockAcquire() * LockAcquire()
*/ */
if (lockstmt->mode > RowExclusiveLock) if (lockstmt->mode > RowExclusiveLock)
PreventCommandDuringRecovery("LOCK TABLE"); PreventCommandDuringRecovery("LOCK TABLE");
@ -74,7 +74,7 @@ static void
RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid, RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid,
void *arg) void *arg)
{ {
LOCKMODE lockmode = * (LOCKMODE *) arg; LOCKMODE lockmode = *(LOCKMODE *) arg;
char relkind; char relkind;
AclResult aclresult; AclResult aclresult;
@ -82,7 +82,8 @@ RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid,
return; /* doesn't exist, so no permissions check */ return; /* doesn't exist, so no permissions check */
relkind = get_rel_relkind(relid); relkind = get_rel_relkind(relid);
if (!relkind) if (!relkind)
return; /* woops, concurrently dropped; no permissions check */ return; /* woops, concurrently dropped; no permissions
* check */
/* Currently, we only allow plain tables to be locked */ /* Currently, we only allow plain tables to be locked */
if (relkind != RELKIND_RELATION) if (relkind != RELKIND_RELATION)
@ -122,6 +123,7 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
{ {
char *relname = get_rel_name(childreloid); char *relname = get_rel_name(childreloid);
if (!relname) if (!relname)
continue; /* child concurrently dropped, just skip it */ continue; /* child concurrently dropped, just skip it */
aclcheck_error(aclresult, ACL_KIND_CLASS, relname); aclcheck_error(aclresult, ACL_KIND_CLASS, relname);
@ -134,6 +136,7 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
{ {
/* try to throw error by name; relation could be deleted... */ /* try to throw error by name; relation could be deleted... */
char *relname = get_rel_name(childreloid); char *relname = get_rel_name(childreloid);
if (!relname) if (!relname)
continue; /* child concurrently dropped, just skip it */ continue; /* child concurrently dropped, just skip it */
ereport(ERROR, ereport(ERROR,
@ -143,8 +146,8 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
} }
/* /*
* Even if we got the lock, child might have been concurrently dropped. * Even if we got the lock, child might have been concurrently
* If so, we can skip it. * dropped. If so, we can skip it.
*/ */
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(childreloid))) if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(childreloid)))
{ {

View File

@ -244,8 +244,8 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
* the OID-determining eflags (PortalStart won't handle them in such a * the OID-determining eflags (PortalStart won't handle them in such a
* case, and for that matter it's not clear the executor will either). * case, and for that matter it's not clear the executor will either).
* *
* For CREATE TABLE ... AS EXECUTE, we also have to ensure that the * For CREATE TABLE ... AS EXECUTE, we also have to ensure that the proper
* proper eflags and fetch count are passed to PortalStart/PortalRun. * eflags and fetch count are passed to PortalStart/PortalRun.
*/ */
if (intoClause) if (intoClause)
{ {

View File

@ -514,12 +514,12 @@ nextval(PG_FUNCTION_ARGS)
sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin)); sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin));
/* /*
* XXX: This is not safe in the presence of concurrent DDL, but * XXX: This is not safe in the presence of concurrent DDL, but acquiring
* acquiring a lock here is more expensive than letting nextval_internal * a lock here is more expensive than letting nextval_internal do it,
* do it, since the latter maintains a cache that keeps us from hitting * since the latter maintains a cache that keeps us from hitting the lock
* the lock manager more than once per transaction. It's not clear * manager more than once per transaction. It's not clear whether the
* whether the performance penalty is material in practice, but for now, * performance penalty is material in practice, but for now, we do it this
* we do it this way. * way.
*/ */
relid = RangeVarGetRelid(sequence, NoLock, false); relid = RangeVarGetRelid(sequence, NoLock, false);
@ -1543,9 +1543,9 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
* is also used for updating sequences, it's possible that a hot-standby * is also used for updating sequences, it's possible that a hot-standby
* backend is examining the page concurrently; so we mustn't transiently * backend is examining the page concurrently; so we mustn't transiently
* trash the buffer. The solution is to build the correct new page * trash the buffer. The solution is to build the correct new page
* contents in local workspace and then memcpy into the buffer. Then * contents in local workspace and then memcpy into the buffer. Then only
* only bytes that are supposed to change will change, even transiently. * bytes that are supposed to change will change, even transiently. We
* We must palloc the local page for alignment reasons. * must palloc the local page for alignment reasons.
*/ */
localpage = (Page) palloc(BufferGetPageSize(buffer)); localpage = (Page) palloc(BufferGetPageSize(buffer));

View File

@ -914,9 +914,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
/* /*
* In DROP INDEX, attempt to acquire lock on the parent table before * In DROP INDEX, attempt to acquire lock on the parent table before
* locking the index. index_drop() will need this anyway, and since * locking the index. index_drop() will need this anyway, and since
* regular queries lock tables before their indexes, we risk deadlock * regular queries lock tables before their indexes, we risk deadlock if
* if we do it the other way around. No error if we don't find a * we do it the other way around. No error if we don't find a pg_index
* pg_index entry, though --- the relation may have been droppd. * entry, though --- the relation may have been droppd.
*/ */
if (relkind == RELKIND_INDEX && relOid != oldRelOid) if (relkind == RELKIND_INDEX && relOid != oldRelOid)
{ {
@ -2391,7 +2391,11 @@ rename_constraint_internal(Oid myrelid,
else else
{ {
targetrelation = relation_open(myrelid, AccessExclusiveLock); targetrelation = relation_open(myrelid, AccessExclusiveLock);
/* don't tell it whether we're recursing; we allow changing typed tables here */
/*
* don't tell it whether we're recursing; we allow changing typed
* tables here
*/
renameatt_check(myrelid, RelationGetForm(targetrelation), false); renameatt_check(myrelid, RelationGetForm(targetrelation), false);
constraintOid = get_relation_constraint_oid(myrelid, oldconname, false); constraintOid = get_relation_constraint_oid(myrelid, oldconname, false);
@ -2492,7 +2496,7 @@ RenameConstraint(RenameStmt *stmt)
stmt->newname, stmt->newname,
stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */ stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
false, /* recursing? */ false, /* recursing? */
0 /* expected inhcount */); 0 /* expected inhcount */ );
} }
/* /*
@ -2507,8 +2511,8 @@ RenameRelation(RenameStmt *stmt)
* Grab an exclusive lock on the target table, index, sequence or view, * Grab an exclusive lock on the target table, index, sequence or view,
* which we will NOT release until end of transaction. * which we will NOT release until end of transaction.
* *
* Lock level used here should match RenameRelationInternal, to avoid * Lock level used here should match RenameRelationInternal, to avoid lock
* lock escalation. * escalation.
*/ */
relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
stmt->missing_ok, false, stmt->missing_ok, false,
@ -2773,22 +2777,22 @@ LOCKMODE
AlterTableGetLockLevel(List *cmds) AlterTableGetLockLevel(List *cmds)
{ {
/* /*
* Late in 9.1 dev cycle a number of issues were uncovered with access * Late in 9.1 dev cycle a number of issues were uncovered with access to
* to catalog relations, leading to the decision to re-enforce all DDL * catalog relations, leading to the decision to re-enforce all DDL at
* at AccessExclusiveLock level by default. * AccessExclusiveLock level by default.
* *
* The issues are that there is a pervasive assumption in the code that * The issues are that there is a pervasive assumption in the code that
* the catalogs will not be read unless an AccessExclusiveLock is held. * the catalogs will not be read unless an AccessExclusiveLock is held. If
* If that rule is relaxed, we must protect against a number of potential * that rule is relaxed, we must protect against a number of potential
* effects - infrequent, but proven possible with test cases where * effects - infrequent, but proven possible with test cases where
* multiple DDL operations occur in a stream against frequently accessed * multiple DDL operations occur in a stream against frequently accessed
* tables. * tables.
* *
* 1. Catalog tables are read using SnapshotNow, which has a race bug * 1. Catalog tables are read using SnapshotNow, which has a race bug that
* that allows a scan to return no valid rows even when one is present * allows a scan to return no valid rows even when one is present in the
* in the case of a commit of a concurrent update of the catalog table. * case of a commit of a concurrent update of the catalog table.
* SnapshotNow also ignores transactions in progress, so takes the * SnapshotNow also ignores transactions in progress, so takes the latest
* latest committed version without waiting for the latest changes. * committed version without waiting for the latest changes.
* *
* 2. Relcache needs to be internally consistent, so unless we lock the * 2. Relcache needs to be internally consistent, so unless we lock the
* definition during reads we have no way to guarantee that. * definition during reads we have no way to guarantee that.
@ -4999,8 +5003,8 @@ ATExecColumnDefault(Relation rel, const char *colName,
* safety, but at present we do not expect anything to depend on the * safety, but at present we do not expect anything to depend on the
* default. * default.
* *
* We treat removing the existing default as an internal operation when * We treat removing the existing default as an internal operation when it
* it is preparatory to adding a new default, but as a user-initiated * is preparatory to adding a new default, but as a user-initiated
* operation when the user asked for a drop. * operation when the user asked for a drop.
*/ */
RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false, RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false,
@ -5514,6 +5518,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
if (OidIsValid(stmt->oldNode)) if (OidIsValid(stmt->oldNode))
{ {
Relation irel = index_open(new_index, NoLock); Relation irel = index_open(new_index, NoLock);
RelationPreserveStorage(irel->rd_node, true); RelationPreserveStorage(irel->rd_node, true);
index_close(irel, NoLock); index_close(irel, NoLock);
} }
@ -6067,8 +6072,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* operand, revalidate the constraint. For this evaluation, a * operand, revalidate the constraint. For this evaluation, a
* binary coercion cast is equivalent to no cast at all. While * binary coercion cast is equivalent to no cast at all. While
* type implementors should design implicit casts with an eye * type implementors should design implicit casts with an eye
* toward consistency of operations like equality, we cannot assume * toward consistency of operations like equality, we cannot
* here that they have done so. * assume here that they have done so.
* *
* A function with a polymorphic argument could change behavior * A function with a polymorphic argument could change behavior
* arbitrarily in response to get_fn_expr_argtype(). Therefore, * arbitrarily in response to get_fn_expr_argtype(). Therefore,
@ -6091,8 +6096,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* *
* We need not directly consider the PK type. It's necessarily * We need not directly consider the PK type. It's necessarily
* binary coercible to the opcintype of the unique index column, * binary coercible to the opcintype of the unique index column,
* and ri_triggers.c will only deal with PK datums in terms of that * and ri_triggers.c will only deal with PK datums in terms of
* opcintype. Changing the opcintype also changes pfeqop. * that opcintype. Changing the opcintype also changes pfeqop.
*/ */
old_check_ok = (new_pathtype == old_pathtype && old_check_ok = (new_pathtype == old_pathtype &&
new_castfunc == old_castfunc && new_castfunc == old_castfunc &&
@ -6144,11 +6149,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
createForeignKeyTriggers(rel, fkconstraint, constrOid, indexOid); createForeignKeyTriggers(rel, fkconstraint, constrOid, indexOid);
/* /*
* Tell Phase 3 to check that the constraint is satisfied by existing rows. * Tell Phase 3 to check that the constraint is satisfied by existing
* We can skip this during table creation, when requested explicitly by * rows. We can skip this during table creation, when requested explicitly
* specifying NOT VALID in an ADD FOREIGN KEY command, and when we're * by specifying NOT VALID in an ADD FOREIGN KEY command, and when we're
* recreating a constraint following a SET DATA TYPE operation that did not * recreating a constraint following a SET DATA TYPE operation that did
* impugn its validity. * not impugn its validity.
*/ */
if (!old_check_ok && !fkconstraint->skip_validation) if (!old_check_ok && !fkconstraint->skip_validation)
{ {
@ -6236,12 +6241,12 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse,
Relation refrel; Relation refrel;
/* /*
* Triggers are already in place on both tables, so a concurrent write * Triggers are already in place on both tables, so a concurrent
* that alters the result here is not possible. Normally we can run a * write that alters the result here is not possible. Normally we
* query here to do the validation, which would only require * can run a query here to do the validation, which would only
* AccessShareLock. In some cases, it is possible that we might need * require AccessShareLock. In some cases, it is possible that we
* to fire triggers to perform the check, so we take a lock at * might need to fire triggers to perform the check, so we take a
* RowShareLock level just in case. * lock at RowShareLock level just in case.
*/ */
refrel = heap_open(con->confrelid, RowShareLock); refrel = heap_open(con->confrelid, RowShareLock);
@ -6679,10 +6684,11 @@ validateCheckConstraint(Relation rel, HeapTuple constrtup)
constrForm = (Form_pg_constraint) GETSTRUCT(constrtup); constrForm = (Form_pg_constraint) GETSTRUCT(constrtup);
estate = CreateExecutorState(); estate = CreateExecutorState();
/* /*
* XXX this tuple doesn't really come from a syscache, but this doesn't * XXX this tuple doesn't really come from a syscache, but this doesn't
* matter to SysCacheGetAttr, because it only wants to be able to fetch the * matter to SysCacheGetAttr, because it only wants to be able to fetch
* tupdesc * the tupdesc
*/ */
val = SysCacheGetAttr(CONSTROID, constrtup, Anum_pg_constraint_conbin, val = SysCacheGetAttr(CONSTROID, constrtup, Anum_pg_constraint_conbin,
&isnull); &isnull);
@ -7140,8 +7146,7 @@ ATExecDropConstraint(Relation rel, const char *constrName,
{ {
/* /*
* If the child constraint has other definition sources, just * If the child constraint has other definition sources, just
* decrement its inheritance count; if not, recurse to delete * decrement its inheritance count; if not, recurse to delete it.
* it.
*/ */
if (con->coninhcount == 1 && !con->conislocal) if (con->coninhcount == 1 && !con->conislocal)
{ {
@ -7164,9 +7169,9 @@ ATExecDropConstraint(Relation rel, const char *constrName,
else else
{ {
/* /*
* If we were told to drop ONLY in this table (no recursion), * If we were told to drop ONLY in this table (no recursion), we
* we need to mark the inheritors' constraints as locally * need to mark the inheritors' constraints as locally defined
* defined rather than inherited. * rather than inherited.
*/ */
con->coninhcount--; con->coninhcount--;
con->conislocal = true; con->conislocal = true;
@ -8061,6 +8066,7 @@ TryReuseIndex(Oid oldId, IndexStmt *stmt)
stmt->excludeOpNames)) stmt->excludeOpNames))
{ {
Relation irel = index_open(oldId, NoLock); Relation irel = index_open(oldId, NoLock);
stmt->oldNode = irel->rd_node.relNode; stmt->oldNode = irel->rd_node.relNode;
index_close(irel, NoLock); index_close(irel, NoLock);
} }
@ -10267,9 +10273,9 @@ RangeVarCallbackOwnsTable(const RangeVar *relation,
return; return;
/* /*
* If the relation does exist, check whether it's an index. But note * If the relation does exist, check whether it's an index. But note that
* that the relation might have been dropped between the time we did the * the relation might have been dropped between the time we did the name
* name lookup and now. In that case, there's nothing to do. * lookup and now. In that case, there's nothing to do.
*/ */
relkind = get_rel_relkind(relId); relkind = get_rel_relkind(relId);
if (!relkind) if (!relkind)
@ -10333,6 +10339,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
} }
else if (IsA(stmt, AlterObjectSchemaStmt)) else if (IsA(stmt, AlterObjectSchemaStmt))
reltype = ((AlterObjectSchemaStmt *) stmt)->objectType; reltype = ((AlterObjectSchemaStmt *) stmt)->objectType;
else if (IsA(stmt, AlterTableStmt)) else if (IsA(stmt, AlterTableStmt))
reltype = ((AlterTableStmt *) stmt)->relkind; reltype = ((AlterTableStmt *) stmt)->relkind;
else else
@ -10342,11 +10349,11 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
} }
/* /*
* For compatibility with prior releases, we allow ALTER TABLE to be * For compatibility with prior releases, we allow ALTER TABLE to be used
* used with most other types of relations (but not composite types). * with most other types of relations (but not composite types). We allow
* We allow similar flexibility for ALTER INDEX in the case of RENAME, * similar flexibility for ALTER INDEX in the case of RENAME, but not
* but not otherwise. Otherwise, the user must select the correct form * otherwise. Otherwise, the user must select the correct form of the
* of the command for the relation at issue. * command for the relation at issue.
*/ */
if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE) if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE)
ereport(ERROR, ereport(ERROR,
@ -10391,10 +10398,10 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
errhint("Use ALTER FOREIGN TABLE instead."))); errhint("Use ALTER FOREIGN TABLE instead.")));
/* /*
* Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be moved
* moved to a different schema, such as indexes and TOAST tables. * to a different schema, such as indexes and TOAST tables.
*/ */
if (IsA(stmt, AlterObjectSchemaStmt) && relkind != RELKIND_RELATION if (IsA(stmt, AlterObjectSchemaStmt) &&relkind != RELKIND_RELATION
&& relkind != RELKIND_VIEW && relkind != RELKIND_SEQUENCE && relkind != RELKIND_VIEW && relkind != RELKIND_SEQUENCE
&& relkind != RELKIND_FOREIGN_TABLE) && relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR, ereport(ERROR,

View File

@ -438,6 +438,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
if (object_access_hook) if (object_access_hook)
{ {
ObjectAccessDrop drop_arg; ObjectAccessDrop drop_arg;
memset(&drop_arg, 0, sizeof(ObjectAccessDrop)); memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP, TableSpaceRelationId, InvokeObjectAccessHook(OAT_DROP, TableSpaceRelationId,
tablespaceoid, 0, &drop_arg); tablespaceoid, 0, &drop_arg);
@ -1204,9 +1205,9 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
* DATABASE SET or ALTER USER SET command. pg_dumpall dumps all * DATABASE SET or ALTER USER SET command. pg_dumpall dumps all
* roles before tablespaces, so if we're restoring a pg_dumpall * roles before tablespaces, so if we're restoring a pg_dumpall
* script the tablespace might not yet exist, but will be created * script the tablespace might not yet exist, but will be created
* later. Because of that, issue a NOTICE if source == PGC_S_TEST, * later. Because of that, issue a NOTICE if source ==
* but accept the value anyway. Otherwise, silently ignore any * PGC_S_TEST, but accept the value anyway. Otherwise, silently
* bad list elements. * ignore any bad list elements.
*/ */
curoid = get_tablespace_oid(curname, source <= PGC_S_TEST); curoid = get_tablespace_oid(curname, source <= PGC_S_TEST);
if (curoid == InvalidOid) if (curoid == InvalidOid)
@ -1493,10 +1494,10 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
* files then do conflict processing and try again, if currently * files then do conflict processing and try again, if currently
* enabled. * enabled.
* *
* Other possible reasons for failure include bollixed file permissions * Other possible reasons for failure include bollixed file
* on a standby server when they were okay on the primary, etc etc. * permissions on a standby server when they were okay on the primary,
* There's not much we can do about that, so just remove what we can * etc etc. There's not much we can do about that, so just remove what
* and press on. * we can and press on.
*/ */
if (!destroy_tablespace_directories(xlrec->ts_id, true)) if (!destroy_tablespace_directories(xlrec->ts_id, true))
{ {

View File

@ -199,8 +199,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
/* /*
* We must take a lock on the target relation to protect against * We must take a lock on the target relation to protect against
* concurrent drop. It's not clear that AccessShareLock is strong * concurrent drop. It's not clear that AccessShareLock is strong
* enough, but we certainly need at least that much... otherwise, * enough, but we certainly need at least that much... otherwise, we
* we might end up creating a pg_constraint entry referencing a * might end up creating a pg_constraint entry referencing a
* nonexistent table. * nonexistent table.
*/ */
constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock, false); constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock, false);
@ -494,8 +494,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name * can skip this for internally generated triggers, since the name
* modification above should be sufficient. * modification above should be sufficient.
* *
* NOTE that this is cool only because we have AccessExclusiveLock on * NOTE that this is cool only because we have AccessExclusiveLock on the
* the relation, so the trigger set won't be changing underneath us. * relation, so the trigger set won't be changing underneath us.
*/ */
if (!isInternal) if (!isInternal)
{ {

View File

@ -1484,7 +1484,7 @@ static void
makeRangeConstructors(const char *name, Oid namespace, makeRangeConstructors(const char *name, Oid namespace,
Oid rangeOid, Oid subtype) Oid rangeOid, Oid subtype)
{ {
static const char * const prosrc[2] = {"range_constructor2", static const char *const prosrc[2] = {"range_constructor2",
"range_constructor3"}; "range_constructor3"};
static const int pronargs[2] = {2, 3}; static const int pronargs[2] = {2, 3};

View File

@ -937,6 +937,7 @@ DropRole(DropRoleStmt *stmt)
if (object_access_hook) if (object_access_hook)
{ {
ObjectAccessDrop drop_arg; ObjectAccessDrop drop_arg;
memset(&drop_arg, 0, sizeof(ObjectAccessDrop)); memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP, InvokeObjectAccessHook(OAT_DROP,
AuthIdRelationId, roleid, 0, &drop_arg); AuthIdRelationId, roleid, 0, &drop_arg);

View File

@ -322,13 +322,13 @@ get_rel_oids(Oid relid, const RangeVar *vacrel)
Oid relid; Oid relid;
/* /*
* Since we don't take a lock here, the relation might be gone, * Since we don't take a lock here, the relation might be gone, or the
* or the RangeVar might no longer refer to the OID we look up * RangeVar might no longer refer to the OID we look up here. In the
* here. In the former case, VACUUM will do nothing; in the * former case, VACUUM will do nothing; in the latter case, it will
* latter case, it will process the OID we looked up here, rather * process the OID we looked up here, rather than the new one.
* than the new one. Neither is ideal, but there's little practical * Neither is ideal, but there's little practical alternative, since
* alternative, since we're going to commit this transaction and * we're going to commit this transaction and begin a new one between
* begin a new one between now and then. * now and then.
*/ */
relid = RangeVarGetRelid(vacrel, NoLock, false); relid = RangeVarGetRelid(vacrel, NoLock, false);

View File

@ -222,17 +222,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
* *
* A corner case here is that if we scanned no pages at all because every * A corner case here is that if we scanned no pages at all because every
* page is all-visible, we should not update relpages/reltuples, because * page is all-visible, we should not update relpages/reltuples, because
* we have no new information to contribute. In particular this keeps * we have no new information to contribute. In particular this keeps us
* us from replacing relpages=reltuples=0 (which means "unknown tuple * from replacing relpages=reltuples=0 (which means "unknown tuple
* density") with nonzero relpages and reltuples=0 (which means "zero * density") with nonzero relpages and reltuples=0 (which means "zero
* tuple density") unless there's some actual evidence for the latter. * tuple density") unless there's some actual evidence for the latter.
* *
* We do update relallvisible even in the corner case, since if the * We do update relallvisible even in the corner case, since if the table
* table is all-visible we'd definitely like to know that. But clamp * is all-visible we'd definitely like to know that. But clamp the value
* the value to be not more than what we're setting relpages to. * to be not more than what we're setting relpages to.
* *
* Also, don't change relfrozenxid if we skipped any pages, since then * Also, don't change relfrozenxid if we skipped any pages, since then we
* we don't know for certain that all tuples have a newer xmin. * don't know for certain that all tuples have a newer xmin.
*/ */
new_rel_pages = vacrelstats->rel_pages; new_rel_pages = vacrelstats->rel_pages;
new_rel_tuples = vacrelstats->new_rel_tuples; new_rel_tuples = vacrelstats->new_rel_tuples;
@ -277,9 +277,9 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
write_rate = 0; write_rate = 0;
if ((secs > 0) || (usecs > 0)) if ((secs > 0) || (usecs > 0))
{ {
read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) / read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
(secs + usecs / 1000000.0); (secs + usecs / 1000000.0);
write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) / write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
(secs + usecs / 1000000.0); (secs + usecs / 1000000.0);
} }
ereport(LOG, ereport(LOG,
@ -300,7 +300,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
VacuumPageHit, VacuumPageHit,
VacuumPageMiss, VacuumPageMiss,
VacuumPageDirty, VacuumPageDirty,
read_rate,write_rate, read_rate, write_rate,
pg_rusage_show(&ru0)))); pg_rusage_show(&ru0))));
} }
} }
@ -501,10 +501,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats->num_dead_tuples > 0) vacrelstats->num_dead_tuples > 0)
{ {
/* /*
* Before beginning index vacuuming, we release any pin we may hold * Before beginning index vacuuming, we release any pin we may
* on the visibility map page. This isn't necessary for correctness, * hold on the visibility map page. This isn't necessary for
* but we do it anyway to avoid holding the pin across a lengthy, * correctness, but we do it anyway to avoid holding the pin
* unrelated operation. * across a lengthy, unrelated operation.
*/ */
if (BufferIsValid(vmbuffer)) if (BufferIsValid(vmbuffer))
{ {
@ -535,10 +535,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/* /*
* Pin the visibility map page in case we need to mark the page * Pin the visibility map page in case we need to mark the page
* all-visible. In most cases this will be very cheap, because we'll * all-visible. In most cases this will be very cheap, because we'll
* already have the correct page pinned anyway. However, it's possible * already have the correct page pinned anyway. However, it's
* that (a) next_not_all_visible_block is covered by a different VM page * possible that (a) next_not_all_visible_block is covered by a
* than the current block or (b) we released our pin and did a cycle of * different VM page than the current block or (b) we released our pin
* index vacuuming. * and did a cycle of index vacuuming.
*/ */
visibilitymap_pin(onerel, blkno, &vmbuffer); visibilitymap_pin(onerel, blkno, &vmbuffer);
@ -873,10 +873,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
else if (!all_visible_according_to_vm) else if (!all_visible_according_to_vm)
{ {
/* /*
* It should never be the case that the visibility map page * It should never be the case that the visibility map page is
* is set while the page-level bit is clear, but the reverse * set while the page-level bit is clear, but the reverse is
* is allowed. Set the visibility map bit as well so that * allowed. Set the visibility map bit as well so that we get
* we get back in sync. * back in sync.
*/ */
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer, visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer,
visibility_cutoff_xid); visibility_cutoff_xid);

View File

@ -204,8 +204,8 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace,
checkViewTupleDesc(descriptor, rel->rd_att); checkViewTupleDesc(descriptor, rel->rd_att);
/* /*
* The new options list replaces the existing options list, even * The new options list replaces the existing options list, even if
* if it's empty. * it's empty.
*/ */
atcmd = makeNode(AlterTableCmd); atcmd = makeNode(AlterTableCmd);
atcmd->subtype = AT_ReplaceRelOptions; atcmd->subtype = AT_ReplaceRelOptions;

View File

@ -66,6 +66,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
TIDBitmap *tbm; TIDBitmap *tbm;
TBMIterator *tbmiterator; TBMIterator *tbmiterator;
TBMIterateResult *tbmres; TBMIterateResult *tbmres;
#ifdef USE_PREFETCH #ifdef USE_PREFETCH
TBMIterator *prefetch_iterator; TBMIterator *prefetch_iterator;
#endif #endif

View File

@ -176,8 +176,8 @@ StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, TupleDesc itupdesc)
* Note: we must use the tupdesc supplied by the AM in index_getattr, not * Note: we must use the tupdesc supplied by the AM in index_getattr, not
* the slot's tupdesc, in case the latter has different datatypes (this * the slot's tupdesc, in case the latter has different datatypes (this
* happens for btree name_ops in particular). They'd better have the same * happens for btree name_ops in particular). They'd better have the same
* number of columns though, as well as being datatype-compatible which * number of columns though, as well as being datatype-compatible which is
* is something we can't so easily check. * something we can't so easily check.
*/ */
Assert(slot->tts_tupleDescriptor->natts == nindexatts); Assert(slot->tts_tupleDescriptor->natts == nindexatts);

View File

@ -419,8 +419,8 @@ MJCompare(MergeJoinState *mergestate)
/* /*
* If we had any NULL-vs-NULL inputs, we do not want to report that the * If we had any NULL-vs-NULL inputs, we do not want to report that the
* tuples are equal. Instead, if result is still 0, change it to +1. * tuples are equal. Instead, if result is still 0, change it to +1. This
* This will result in advancing the inner side of the join. * will result in advancing the inner side of the join.
* *
* Likewise, if there was a constant-false joinqual, do not report * Likewise, if there was a constant-false joinqual, do not report
* equality. We have to check this as part of the mergequals, else the * equality. We have to check this as part of the mergequals, else the

View File

@ -950,8 +950,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* If there are indices on the result relation, open them and save * If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new * descriptors in the result relation info, so that we can add new
* index entries for the tuples we add/update. We need not do this * index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes. * for a DELETE, however, since deletion doesn't affect indexes. Also,
* Also, inside an EvalPlanQual operation, the indexes might be open * inside an EvalPlanQual operation, the indexes might be open
* already, since we share the resultrel state with the original * already, since we share the resultrel state with the original
* query. * query.
*/ */

View File

@ -1674,8 +1674,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
raw_parsetree_list = pg_parse_query(src); raw_parsetree_list = pg_parse_query(src);
/* /*
* Do parse analysis and rule rewrite for each raw parsetree, storing * Do parse analysis and rule rewrite for each raw parsetree, storing the
* the results into unsaved plancache entries. * results into unsaved plancache entries.
*/ */
plancache_list = NIL; plancache_list = NIL;
@ -1686,8 +1686,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
CachedPlanSource *plansource; CachedPlanSource *plansource;
/* /*
* Create the CachedPlanSource before we do parse analysis, since * Create the CachedPlanSource before we do parse analysis, since it
* it needs to see the unmodified raw parse tree. * needs to see the unmodified raw parse tree.
*/ */
plansource = CreateCachedPlan(parsetree, plansource = CreateCachedPlan(parsetree,
src, src,
@ -2335,9 +2335,9 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan)
/* /*
* Reparent all the CachedPlanSources into the procedure context. In * Reparent all the CachedPlanSources into the procedure context. In
* theory this could fail partway through due to the pallocs, but we * theory this could fail partway through due to the pallocs, but we don't
* don't care too much since both the procedure context and the executor * care too much since both the procedure context and the executor context
* context would go away on error. * would go away on error.
*/ */
foreach(lc, plan->plancache_list) foreach(lc, plan->plancache_list)
{ {

View File

@ -316,8 +316,8 @@ ClientAuthentication(Port *port)
/* /*
* Get the authentication method to use for this frontend/database * Get the authentication method to use for this frontend/database
* combination. Note: we do not parse the file at this point; this has * combination. Note: we do not parse the file at this point; this has
* already been done elsewhere. hba.c dropped an error message * already been done elsewhere. hba.c dropped an error message into the
* into the server logfile if parsing the hba config file failed. * server logfile if parsing the hba config file failed.
*/ */
hba_getauthmethod(port); hba_getauthmethod(port);
@ -1365,10 +1365,10 @@ pg_SSPI_recvauth(Port *port)
} }
/* /*
* Overwrite the current context with the one we just received. * Overwrite the current context with the one we just received. If
* If sspictx is NULL it was the first loop and we need to allocate * sspictx is NULL it was the first loop and we need to allocate a
* a buffer for it. On subsequent runs, we can just overwrite the * buffer for it. On subsequent runs, we can just overwrite the buffer
* buffer contents since the size does not change. * contents since the size does not change.
*/ */
if (sspictx == NULL) if (sspictx == NULL)
{ {

View File

@ -845,8 +845,8 @@ initialize_SSL(void)
{ {
/* /*
* Always ask for SSL client cert, but don't fail if it's not * Always ask for SSL client cert, but don't fail if it's not
* presented. We might fail such connections later, depending on * presented. We might fail such connections later, depending on what
* what we find in pg_hba.conf. * we find in pg_hba.conf.
*/ */
SSL_CTX_set_verify(SSL_context, SSL_CTX_set_verify(SSL_context,
(SSL_VERIFY_PEER | (SSL_VERIFY_PEER |

View File

@ -443,10 +443,9 @@ is_member(Oid userid, const char *role)
return false; /* if target role not exist, say "no" */ return false; /* if target role not exist, say "no" */
/* /*
* See if user is directly or indirectly a member of role. * See if user is directly or indirectly a member of role. For this
* For this purpose, a superuser is not considered to be automatically * purpose, a superuser is not considered to be automatically a member of
* a member of the role, so group auth only applies to explicit * the role, so group auth only applies to explicit membership.
* membership.
*/ */
return is_member_of_role_nosuper(userid, roleid); return is_member_of_role_nosuper(userid, roleid);
} }
@ -1293,6 +1292,7 @@ parse_hba_line(List *line, int line_num)
foreach(tokencell, tokens) foreach(tokencell, tokens)
{ {
char *val; char *val;
token = lfirst(tokencell); token = lfirst(tokencell);
str = pstrdup(token->string); str = pstrdup(token->string);
@ -1397,9 +1397,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
else if (strcmp(name, "clientcert") == 0) else if (strcmp(name, "clientcert") == 0)
{ {
/* /*
* Since we require ctHostSSL, this really can never happen * Since we require ctHostSSL, this really can never happen on
* on non-SSL-enabled builds, so don't bother checking for * non-SSL-enabled builds, so don't bother checking for USE_SSL.
* USE_SSL.
*/ */
if (hbaline->conntype != ctHostSSL) if (hbaline->conntype != ctHostSSL)
{ {
@ -1742,8 +1741,8 @@ load_hba(void)
{ {
/* /*
* Parse error in the file, so indicate there's a problem. NB: a * Parse error in the file, so indicate there's a problem. NB: a
* problem in a line will free the memory for all previous lines as * problem in a line will free the memory for all previous lines
* well! * as well!
*/ */
MemoryContextReset(hbacxt); MemoryContextReset(hbacxt);
new_parsed_lines = NIL; new_parsed_lines = NIL;
@ -1761,9 +1760,9 @@ load_hba(void)
} }
/* /*
* A valid HBA file must have at least one entry; else there's no way * A valid HBA file must have at least one entry; else there's no way to
* to connect to the postmaster. But only complain about this if we * connect to the postmaster. But only complain about this if we didn't
* didn't already have parsing errors. * already have parsing errors.
*/ */
if (ok && new_parsed_lines == NIL) if (ok && new_parsed_lines == NIL)
{ {

View File

@ -1247,9 +1247,9 @@ internal_flush(void)
/* /*
* We drop the buffered data anyway so that processing can * We drop the buffered data anyway so that processing can
* continue, even though we'll probably quit soon. We also * continue, even though we'll probably quit soon. We also set a
* set a flag that'll cause the next CHECK_FOR_INTERRUPTS * flag that'll cause the next CHECK_FOR_INTERRUPTS to terminate
* to terminate the connection. * the connection.
*/ */
PqSendStart = PqSendPointer = 0; PqSendStart = PqSendPointer = 0;
ClientConnectionLost = 1; ClientConnectionLost = 1;

View File

@ -992,14 +992,14 @@ tbm_lossify(TIDBitmap *tbm)
} }
/* /*
* With a big bitmap and small work_mem, it's possible that we cannot * With a big bitmap and small work_mem, it's possible that we cannot get
* get under maxentries. Again, if that happens, we'd end up uselessly * under maxentries. Again, if that happens, we'd end up uselessly
* calling tbm_lossify over and over. To prevent this from becoming a * calling tbm_lossify over and over. To prevent this from becoming a
* performance sink, force maxentries up to at least double the current * performance sink, force maxentries up to at least double the current
* number of entries. (In essence, we're admitting inability to fit * number of entries. (In essence, we're admitting inability to fit
* within work_mem when we do this.) Note that this test will not fire * within work_mem when we do this.) Note that this test will not fire if
* if we broke out of the loop early; and if we didn't, the current * we broke out of the loop early; and if we didn't, the current number of
* number of entries is simply not reducible any further. * entries is simply not reducible any further.
*/ */
if (tbm->nentries > tbm->maxentries / 2) if (tbm->nentries > tbm->maxentries / 2)
tbm->maxentries = Min(tbm->nentries, (INT_MAX - 1) / 2) * 2; tbm->maxentries = Min(tbm->nentries, (INT_MAX - 1) / 2) * 2;
@ -1011,8 +1011,8 @@ tbm_lossify(TIDBitmap *tbm)
static int static int
tbm_comparator(const void *left, const void *right) tbm_comparator(const void *left, const void *right)
{ {
BlockNumber l = (*((PagetableEntry * const *) left))->blockno; BlockNumber l = (*((PagetableEntry *const *) left))->blockno;
BlockNumber r = (*((PagetableEntry * const *) right))->blockno; BlockNumber r = (*((PagetableEntry *const *) right))->blockno;
if (l < r) if (l < r)
return -1; return -1;

View File

@ -65,8 +65,8 @@ geqo_selection(PlannerInfo *root, Chromosome *momma, Chromosome *daddy,
* one, when we can't. * one, when we can't.
* *
* This code was observed to hang up in an infinite loop when the * This code was observed to hang up in an infinite loop when the
* platform's implementation of erand48() was broken. We now always * platform's implementation of erand48() was broken. We now always use
* use our own version. * our own version.
*/ */
if (pool->size > 1) if (pool->size > 1)
{ {

View File

@ -251,6 +251,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
} }
break; break;
case RTE_SUBQUERY: case RTE_SUBQUERY:
/* /*
* Subqueries don't support parameterized paths, so just go * Subqueries don't support parameterized paths, so just go
* ahead and build their paths immediately. * ahead and build their paths immediately.
@ -264,6 +265,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
set_values_size_estimates(root, rel); set_values_size_estimates(root, rel);
break; break;
case RTE_CTE: case RTE_CTE:
/* /*
* CTEs don't support parameterized paths, so just go ahead * CTEs don't support parameterized paths, so just go ahead
* and build their paths immediately. * and build their paths immediately.
@ -574,8 +576,8 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/* /*
* It is possible that constraint exclusion detected a contradiction * It is possible that constraint exclusion detected a contradiction
* within a child subquery, even though we didn't prove one above. * within a child subquery, even though we didn't prove one above. If
* If so, we can skip this child. * so, we can skip this child.
*/ */
if (IS_DUMMY_REL(childrel)) if (IS_DUMMY_REL(childrel))
continue; continue;
@ -1115,9 +1117,9 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
rel->subroot = subroot; rel->subroot = subroot;
/* /*
* It's possible that constraint exclusion proved the subquery empty. * It's possible that constraint exclusion proved the subquery empty. If
* If so, it's convenient to turn it back into a dummy path so that we * so, it's convenient to turn it back into a dummy path so that we will
* will recognize appropriate optimizations at this level. * recognize appropriate optimizations at this level.
*/ */
if (is_dummy_plan(rel->subplan)) if (is_dummy_plan(rel->subplan))
{ {

View File

@ -3221,8 +3221,8 @@ compute_semi_anti_join_factors(PlannerInfo *root,
* *
* Note: it is correct to use the inner rel's "rows" count here, even * Note: it is correct to use the inner rel's "rows" count here, even
* though we might later be considering a parameterized inner path with * though we might later be considering a parameterized inner path with
* fewer rows. This is because we have included all the join clauses * fewer rows. This is because we have included all the join clauses in
* in the selectivity estimate. * the selectivity estimate.
*/ */
if (jselec > 0) /* protect against zero divide */ if (jselec > 0) /* protect against zero divide */
{ {
@ -3282,6 +3282,7 @@ has_indexed_join_quals(NestPath *joinpath)
break; break;
} }
default: default:
/* /*
* If it's not a simple indexscan, it probably doesn't run quickly * If it's not a simple indexscan, it probably doesn't run quickly
* for zero rows out, even if it's a parameterized path using all * for zero rows out, even if it's a parameterized path using all
@ -3293,8 +3294,8 @@ has_indexed_join_quals(NestPath *joinpath)
/* /*
* Examine the inner path's param clauses. Any that are from the outer * Examine the inner path's param clauses. Any that are from the outer
* path must be found in the indexclauses list, either exactly or in an * path must be found in the indexclauses list, either exactly or in an
* equivalent form generated by equivclass.c. Also, we must find at * equivalent form generated by equivclass.c. Also, we must find at least
* least one such clause, else it's a clauseless join which isn't fast. * one such clause, else it's a clauseless join which isn't fast.
*/ */
found_one = false; found_one = false;
foreach(lc, innerpath->param_info->ppi_clauses) foreach(lc, innerpath->param_info->ppi_clauses)

View File

@ -1217,9 +1217,9 @@ generate_join_implied_equalities_broken(PlannerInfo *root,
/* /*
* If we have to translate, just brute-force apply adjust_appendrel_attrs * If we have to translate, just brute-force apply adjust_appendrel_attrs
* to all the RestrictInfos at once. This will result in returning * to all the RestrictInfos at once. This will result in returning
* RestrictInfos that are not listed in ec_derives, but there shouldn't * RestrictInfos that are not listed in ec_derives, but there shouldn't be
* be any duplication, and it's a sufficiently narrow corner case that * any duplication, and it's a sufficiently narrow corner case that we
* we shouldn't sweat too much over it anyway. * shouldn't sweat too much over it anyway.
*/ */
if (inner_appinfo) if (inner_appinfo)
result = (List *) adjust_appendrel_attrs(root, (Node *) result, result = (List *) adjust_appendrel_attrs(root, (Node *) result,

View File

@ -246,24 +246,24 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
/* /*
* Identify the join clauses that can match the index. For the moment * Identify the join clauses that can match the index. For the moment
* we keep them separate from the restriction clauses. Note that * we keep them separate from the restriction clauses. Note that this
* this finds only "loose" join clauses that have not been merged * finds only "loose" join clauses that have not been merged into
* into EquivalenceClasses. Also, collect join OR clauses for later. * EquivalenceClasses. Also, collect join OR clauses for later.
*/ */
MemSet(&jclauseset, 0, sizeof(jclauseset)); MemSet(&jclauseset, 0, sizeof(jclauseset));
match_join_clauses_to_index(root, rel, index, match_join_clauses_to_index(root, rel, index,
&jclauseset, &joinorclauses); &jclauseset, &joinorclauses);
/* /*
* Look for EquivalenceClasses that can generate joinclauses * Look for EquivalenceClasses that can generate joinclauses matching
* matching the index. * the index.
*/ */
MemSet(&eclauseset, 0, sizeof(eclauseset)); MemSet(&eclauseset, 0, sizeof(eclauseset));
match_eclass_clauses_to_index(root, index, &eclauseset); match_eclass_clauses_to_index(root, index, &eclauseset);
/* /*
* If we found any plain or eclass join clauses, decide what to * If we found any plain or eclass join clauses, decide what to do
* do with 'em. * with 'em.
*/ */
if (jclauseset.nonempty || eclauseset.nonempty) if (jclauseset.nonempty || eclauseset.nonempty)
consider_index_join_clauses(root, rel, index, consider_index_join_clauses(root, rel, index,
@ -366,19 +366,19 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
* We can always include any restriction clauses in the index clauses. * We can always include any restriction clauses in the index clauses.
* However, it's not obvious which subsets of the join clauses are worth * However, it's not obvious which subsets of the join clauses are worth
* generating paths from, and it's unlikely that considering every * generating paths from, and it's unlikely that considering every
* possible subset is worth the cycles. Our current heuristic is based * possible subset is worth the cycles. Our current heuristic is based on
* on the index columns, with the idea that later index columns are less * the index columns, with the idea that later index columns are less
* useful than earlier ones; therefore it's unlikely to be worth trying * useful than earlier ones; therefore it's unlikely to be worth trying
* combinations that would remove a clause from an earlier index column * combinations that would remove a clause from an earlier index column
* while adding one to a later column. Also, we know that all the * while adding one to a later column. Also, we know that all the eclass
* eclass clauses for a particular column are redundant, so we should * clauses for a particular column are redundant, so we should use only
* use only one of them. However, eclass clauses will always represent * one of them. However, eclass clauses will always represent equality
* equality which is the strongest type of index constraint, so those * which is the strongest type of index constraint, so those are
* are high-value and we should try every available combination when we * high-value and we should try every available combination when we have
* have eclass clauses for more than one column. Furthermore, it's * eclass clauses for more than one column. Furthermore, it's unlikely to
* unlikely to be useful to combine an eclass clause with non-eclass * be useful to combine an eclass clause with non-eclass clauses for the
* clauses for the same index column. These considerations lead to the * same index column. These considerations lead to the following
* following heuristics: * heuristics:
* *
* First, start with the restriction clauses, and add on all simple join * First, start with the restriction clauses, and add on all simple join
* clauses for column 1. If there are any such join clauses, generate * clauses for column 1. If there are any such join clauses, generate
@ -542,17 +542,16 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
SAOP_PER_AM, ST_ANYSCAN); SAOP_PER_AM, ST_ANYSCAN);
/* /*
* Submit all the ones that can form plain IndexScan plans to add_path. * Submit all the ones that can form plain IndexScan plans to add_path. (A
* (A plain IndexPath can represent either a plain IndexScan or an * plain IndexPath can represent either a plain IndexScan or an
* IndexOnlyScan, but for our purposes here that distinction does not * IndexOnlyScan, but for our purposes here that distinction does not
* matter. However, some of the indexes might support only bitmap scans, * matter. However, some of the indexes might support only bitmap scans,
* and those we mustn't submit to add_path here.) * and those we mustn't submit to add_path here.)
* *
* Also, pick out the ones that are usable as bitmap scans. For that, * Also, pick out the ones that are usable as bitmap scans. For that, we
* we must discard indexes that don't support bitmap scans, and we * must discard indexes that don't support bitmap scans, and we also are
* also are only interested in paths that have some selectivity; we * only interested in paths that have some selectivity; we should discard
* should discard anything that was generated solely for ordering * anything that was generated solely for ordering purposes.
* purposes.
*/ */
foreach(lc, indexpaths) foreach(lc, indexpaths)
{ {
@ -568,9 +567,9 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
} }
/* /*
* If the index doesn't handle ScalarArrayOpExpr clauses natively, * If the index doesn't handle ScalarArrayOpExpr clauses natively, check
* check to see if there are any such clauses, and if so generate * to see if there are any such clauses, and if so generate bitmap scan
* bitmap scan paths relying on executor-managed ScalarArrayOpExpr. * paths relying on executor-managed ScalarArrayOpExpr.
*/ */
if (!index->amsearcharray) if (!index->amsearcharray)
{ {
@ -658,19 +657,19 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
/* /*
* 1. Collect the index clauses into a single list. * 1. Collect the index clauses into a single list.
* *
* We build a list of RestrictInfo nodes for clauses to be used with * We build a list of RestrictInfo nodes for clauses to be used with this
* this index, along with an integer list of the index column numbers * index, along with an integer list of the index column numbers (zero
* (zero based) that each clause should be used with. The clauses are * based) that each clause should be used with. The clauses are ordered
* ordered by index key, so that the column numbers form a nondecreasing * by index key, so that the column numbers form a nondecreasing sequence.
* sequence. (This order is depended on by btree and possibly other * (This order is depended on by btree and possibly other places.) The
* places.) The lists can be empty, if the index AM allows that. * lists can be empty, if the index AM allows that.
* *
* found_clause is set true only if there's at least one index clause; * found_clause is set true only if there's at least one index clause; and
* and if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr * if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr
* clause. * clause.
* *
* We also build a Relids set showing which outer rels are required * We also build a Relids set showing which outer rels are required by the
* by the selected clauses. * selected clauses.
*/ */
index_clauses = NIL; index_clauses = NIL;
clause_columns = NIL; clause_columns = NIL;
@ -706,8 +705,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
* If no clauses match the first index column, check for amoptionalkey * If no clauses match the first index column, check for amoptionalkey
* restriction. We can't generate a scan over an index with * restriction. We can't generate a scan over an index with
* amoptionalkey = false unless there's at least one index clause. * amoptionalkey = false unless there's at least one index clause.
* (When working on columns after the first, this test cannot fail. * (When working on columns after the first, this test cannot fail. It
* It is always okay for columns after the first to not have any * is always okay for columns after the first to not have any
* clauses.) * clauses.)
*/ */
if (index_clauses == NIL && !index->amoptionalkey) if (index_clauses == NIL && !index->amoptionalkey)
@ -865,8 +864,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
/* /*
* Ignore partial indexes that do not match the query. If a partial * Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK. Otherwise, we have * index is marked predOK then we know it's OK. Otherwise, we have to
* to test whether the added clauses are sufficient to imply the * test whether the added clauses are sufficient to imply the
* predicate. If so, we can use the index in the current context. * predicate. If so, we can use the index in the current context.
* *
* We set useful_predicate to true iff the predicate was proven using * We set useful_predicate to true iff the predicate was proven using
@ -904,8 +903,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
match_clauses_to_index(index, clauses, &clauseset); match_clauses_to_index(index, clauses, &clauseset);
/* /*
* If no matches so far, and the index predicate isn't useful, * If no matches so far, and the index predicate isn't useful, we
* we don't want it. * don't want it.
*/ */
if (!clauseset.nonempty && !useful_predicate) if (!clauseset.nonempty && !useful_predicate)
continue; continue;
@ -1581,16 +1580,16 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
return false; return false;
/* /*
* Check that all needed attributes of the relation are available from * Check that all needed attributes of the relation are available from the
* the index. * index.
* *
* XXX this is overly conservative for partial indexes, since we will * XXX this is overly conservative for partial indexes, since we will
* consider attributes involved in the index predicate as required even * consider attributes involved in the index predicate as required even
* though the predicate won't need to be checked at runtime. (The same * though the predicate won't need to be checked at runtime. (The same is
* is true for attributes used only in index quals, if we are certain * true for attributes used only in index quals, if we are certain that
* that the index is not lossy.) However, it would be quite expensive * the index is not lossy.) However, it would be quite expensive to
* to determine that accurately at this point, so for now we take the * determine that accurately at this point, so for now we take the easy
* easy way out. * way out.
*/ */
/* /*
@ -2195,8 +2194,8 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
/* /*
* We allow any column of the index to match each pathkey; they * We allow any column of the index to match each pathkey; they
* don't have to match left-to-right as you might expect. This * don't have to match left-to-right as you might expect. This is
* is correct for GiST, which is the sole existing AM supporting * correct for GiST, which is the sole existing AM supporting
* amcanorderbyop. We might need different logic in future for * amcanorderbyop. We might need different logic in future for
* other implementations. * other implementations.
*/ */
@ -2393,8 +2392,8 @@ eclass_member_matches_indexcol(EquivalenceClass *ec, EquivalenceMember *em,
* If it's a btree index, we can reject it if its opfamily isn't * If it's a btree index, we can reject it if its opfamily isn't
* compatible with the EC, since no clause generated from the EC could be * compatible with the EC, since no clause generated from the EC could be
* used with the index. For non-btree indexes, we can't easily tell * used with the index. For non-btree indexes, we can't easily tell
* whether clauses generated from the EC could be used with the index, * whether clauses generated from the EC could be used with the index, so
* so don't check the opfamily. This might mean we return "true" for a * don't check the opfamily. This might mean we return "true" for a
* useless EC, so we have to recheck the results of * useless EC, so we have to recheck the results of
* generate_implied_equalities_for_indexcol; see * generate_implied_equalities_for_indexcol; see
* match_eclass_clauses_to_index. * match_eclass_clauses_to_index.
@ -3300,9 +3299,9 @@ adjust_rowcompare_for_index(RowCompareExpr *clause,
/* /*
* See how many of the remaining columns match some index column in the * See how many of the remaining columns match some index column in the
* same way. As in match_clause_to_indexcol(), the "other" side of * same way. As in match_clause_to_indexcol(), the "other" side of any
* any potential index condition is OK as long as it doesn't use Vars from * potential index condition is OK as long as it doesn't use Vars from the
* the indexed relation. * indexed relation.
*/ */
matching_cols = 1; matching_cols = 1;
largs_cell = lnext(list_head(clause->largs)); largs_cell = lnext(list_head(clause->largs));

View File

@ -219,8 +219,8 @@ try_nestloop_path(PlannerInfo *root,
JoinCostWorkspace workspace; JoinCostWorkspace workspace;
/* /*
* Check to see if proposed path is still parameterized, and reject if * Check to see if proposed path is still parameterized, and reject if the
* the parameterization wouldn't be sensible. * parameterization wouldn't be sensible.
*/ */
required_outer = calc_nestloop_required_outer(outer_path, required_outer = calc_nestloop_required_outer(outer_path,
inner_path); inner_path);
@ -292,8 +292,8 @@ try_mergejoin_path(PlannerInfo *root,
JoinCostWorkspace workspace; JoinCostWorkspace workspace;
/* /*
* Check to see if proposed path is still parameterized, and reject if * Check to see if proposed path is still parameterized, and reject if the
* the parameterization wouldn't be sensible. * parameterization wouldn't be sensible.
*/ */
required_outer = calc_non_nestloop_required_outer(outer_path, required_outer = calc_non_nestloop_required_outer(outer_path,
inner_path); inner_path);
@ -371,8 +371,8 @@ try_hashjoin_path(PlannerInfo *root,
JoinCostWorkspace workspace; JoinCostWorkspace workspace;
/* /*
* Check to see if proposed path is still parameterized, and reject if * Check to see if proposed path is still parameterized, and reject if the
* the parameterization wouldn't be sensible. * parameterization wouldn't be sensible.
*/ */
required_outer = calc_non_nestloop_required_outer(outer_path, required_outer = calc_non_nestloop_required_outer(outer_path,
inner_path); inner_path);
@ -582,8 +582,8 @@ sort_inner_and_outer(PlannerInfo *root,
* And now we can make the path. * And now we can make the path.
* *
* Note: it's possible that the cheapest paths will already be sorted * Note: it's possible that the cheapest paths will already be sorted
* properly. try_mergejoin_path will detect that case and suppress * properly. try_mergejoin_path will detect that case and suppress an
* an explicit sort step, so we needn't do so here. * explicit sort step, so we needn't do so here.
*/ */
try_mergejoin_path(root, try_mergejoin_path(root,
joinrel, joinrel,
@ -775,8 +775,8 @@ match_unsorted_outer(PlannerInfo *root,
/* /*
* Consider nestloop joins using this outer path and various * Consider nestloop joins using this outer path and various
* available paths for the inner relation. We consider the * available paths for the inner relation. We consider the
* cheapest-total paths for each available parameterization of * cheapest-total paths for each available parameterization of the
* the inner relation, including the unparameterized case. * inner relation, including the unparameterized case.
*/ */
ListCell *lc2; ListCell *lc2;
@ -847,8 +847,8 @@ match_unsorted_outer(PlannerInfo *root,
/* /*
* Generate a mergejoin on the basis of sorting the cheapest inner. * Generate a mergejoin on the basis of sorting the cheapest inner.
* Since a sort will be needed, only cheapest total cost matters. (But * Since a sort will be needed, only cheapest total cost matters. (But
* try_mergejoin_path will do the right thing if * try_mergejoin_path will do the right thing if inner_cheapest_total
* inner_cheapest_total is already correctly sorted.) * is already correctly sorted.)
*/ */
try_mergejoin_path(root, try_mergejoin_path(root,
joinrel, joinrel,
@ -873,9 +873,9 @@ match_unsorted_outer(PlannerInfo *root,
* mergejoin using a subset of the merge clauses. Here, we consider * mergejoin using a subset of the merge clauses. Here, we consider
* both cheap startup cost and cheap total cost. * both cheap startup cost and cheap total cost.
* *
* Currently we do not consider parameterized inner paths here. * Currently we do not consider parameterized inner paths here. This
* This interacts with decisions elsewhere that also discriminate * interacts with decisions elsewhere that also discriminate against
* against mergejoins with parameterized inputs; see comments in * mergejoins with parameterized inputs; see comments in
* src/backend/optimizer/README. * src/backend/optimizer/README.
* *
* As we shorten the sortkey list, we should consider only paths that * As we shorten the sortkey list, we should consider only paths that

View File

@ -95,8 +95,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
/* /*
* Find potentially interesting OR joinclauses. We can use any joinclause * Find potentially interesting OR joinclauses. We can use any joinclause
* that is considered safe to move to this rel by the parameterized-path * that is considered safe to move to this rel by the parameterized-path
* machinery, even though what we are going to do with it is not exactly * machinery, even though what we are going to do with it is not exactly a
* a parameterized path. * parameterized path.
*/ */
foreach(i, rel->joininfo) foreach(i, rel->joininfo)
{ {

View File

@ -1880,8 +1880,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
Assert(rte->rtekind == RTE_RELATION); Assert(rte->rtekind == RTE_RELATION);
/* /*
* Sort clauses into best execution order. We do this first since the * Sort clauses into best execution order. We do this first since the FDW
* FDW might have more info than we do and wish to adjust the ordering. * might have more info than we do and wish to adjust the ordering.
*/ */
scan_clauses = order_qual_clauses(root, scan_clauses); scan_clauses = order_qual_clauses(root, scan_clauses);
@ -2523,9 +2523,9 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
/* /*
* If not to be replaced, just return the PlaceHolderVar unmodified. * If not to be replaced, just return the PlaceHolderVar unmodified.
* We use bms_overlap as a cheap/quick test to see if the PHV might * We use bms_overlap as a cheap/quick test to see if the PHV might be
* be evaluated in the outer rels, and then grab its PlaceHolderInfo * evaluated in the outer rels, and then grab its PlaceHolderInfo to
* to tell for sure. * tell for sure.
*/ */
if (!bms_overlap(phv->phrels, root->curOuterRels)) if (!bms_overlap(phv->phrels, root->curOuterRels))
return node; return node;
@ -3690,13 +3690,12 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
{ {
/* /*
* If we are given a sort column number to match, only consider * If we are given a sort column number to match, only consider
* the single TLE at that position. It's possible that there * the single TLE at that position. It's possible that there is
* is no such TLE, in which case fall through and generate a * no such TLE, in which case fall through and generate a resjunk
* resjunk targetentry (we assume this must have happened in the * targetentry (we assume this must have happened in the parent
* parent plan as well). If there is a TLE but it doesn't match * plan as well). If there is a TLE but it doesn't match the
* the pathkey's EC, we do the same, which is probably the wrong * pathkey's EC, we do the same, which is probably the wrong thing
* thing but we'll leave it to caller to complain about the * but we'll leave it to caller to complain about the mismatch.
* mismatch.
*/ */
tle = get_tle_by_resno(tlist, reqColIdx[numsortkeys]); tle = get_tle_by_resno(tlist, reqColIdx[numsortkeys]);
if (tle) if (tle)
@ -3746,11 +3745,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
if (!tle) if (!tle)
{ {
/* /*
* No matching tlist item; look for a computable expression. * No matching tlist item; look for a computable expression. Note
* Note that we treat Aggrefs as if they were variables; this * that we treat Aggrefs as if they were variables; this is
* is necessary when attempting to sort the output from an Agg * necessary when attempting to sort the output from an Agg node
* node for use in a WindowFunc (since grouping_planner will * for use in a WindowFunc (since grouping_planner will have
* have treated the Aggrefs as variables, too). * treated the Aggrefs as variables, too).
*/ */
Expr *sortexpr = NULL; Expr *sortexpr = NULL;
@ -3769,7 +3768,8 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
continue; continue;
/* /*
* Ignore child members unless they match the rel being sorted. * Ignore child members unless they match the rel being
* sorted.
*/ */
if (em->em_is_child && if (em->em_is_child &&
!bms_equal(em->em_relids, relids)) !bms_equal(em->em_relids, relids))
@ -3877,8 +3877,7 @@ find_ec_member_for_tle(EquivalenceClass *ec,
/* /*
* We shouldn't be trying to sort by an equivalence class that * We shouldn't be trying to sort by an equivalence class that
* contains a constant, so no need to consider such cases any * contains a constant, so no need to consider such cases any further.
* further.
*/ */
if (em->em_is_const) if (em->em_is_const)
continue; continue;

View File

@ -192,9 +192,9 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
where_needed); where_needed);
/* /*
* If we are creating PlaceHolderInfos, mark them with the * If we are creating PlaceHolderInfos, mark them with the correct
* correct maybe-needed locations. Otherwise, it's too late to * maybe-needed locations. Otherwise, it's too late to change
* change that. * that.
*/ */
if (create_new_ph) if (create_new_ph)
mark_placeholder_maybe_needed(root, phinfo, where_needed); mark_placeholder_maybe_needed(root, phinfo, where_needed);

View File

@ -146,8 +146,8 @@ query_planner(PlannerInfo *root, List *tlist,
/* /*
* Make a flattened version of the rangetable for faster access (this is * Make a flattened version of the rangetable for faster access (this is
* OK because the rangetable won't change any more), and set up an * OK because the rangetable won't change any more), and set up an empty
* empty array for indexing base relations. * array for indexing base relations.
*/ */
setup_simple_rel_arrays(root); setup_simple_rel_arrays(root);

View File

@ -766,9 +766,9 @@ inheritance_planner(PlannerInfo *root)
/* /*
* The rowMarks list might contain references to subquery RTEs, so * The rowMarks list might contain references to subquery RTEs, so
* make a copy that we can apply ChangeVarNodes to. (Fortunately, * make a copy that we can apply ChangeVarNodes to. (Fortunately, the
* the executor doesn't need to see the modified copies --- we can * executor doesn't need to see the modified copies --- we can just
* just pass it the original rowMarks list.) * pass it the original rowMarks list.)
*/ */
subroot.rowMarks = (List *) copyObject(root->rowMarks); subroot.rowMarks = (List *) copyObject(root->rowMarks);
@ -784,10 +784,11 @@ inheritance_planner(PlannerInfo *root)
/* /*
* If this isn't the first child Query, generate duplicates of all * If this isn't the first child Query, generate duplicates of all
* subquery RTEs, and adjust Var numbering to reference the duplicates. * subquery RTEs, and adjust Var numbering to reference the
* To simplify the loop logic, we scan the original rtable not the * duplicates. To simplify the loop logic, we scan the original rtable
* copy just made by adjust_appendrel_attrs; that should be OK since * not the copy just made by adjust_appendrel_attrs; that should be OK
* subquery RTEs couldn't contain any references to the target rel. * since subquery RTEs couldn't contain any references to the target
* rel.
*/ */
if (final_rtable != NIL) if (final_rtable != NIL)
{ {
@ -1317,18 +1318,17 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
need_sort_for_grouping = true; need_sort_for_grouping = true;
/* /*
* Always override create_plan's tlist, so that we don't * Always override create_plan's tlist, so that we don't sort
* sort useless data from a "physical" tlist. * useless data from a "physical" tlist.
*/ */
need_tlist_eval = true; need_tlist_eval = true;
} }
/* /*
* create_plan returns a plan with just a "flat" tlist of * create_plan returns a plan with just a "flat" tlist of required
* required Vars. Usually we need to insert the sub_tlist as the * Vars. Usually we need to insert the sub_tlist as the tlist of
* tlist of the top plan node. However, we can skip that if we * the top plan node. However, we can skip that if we determined
* determined that whatever create_plan chose to return will be * that whatever create_plan chose to return will be good enough.
* good enough.
*/ */
if (need_tlist_eval) if (need_tlist_eval)
{ {
@ -2653,8 +2653,8 @@ make_subplanTargetList(PlannerInfo *root,
} }
/* /*
* Otherwise, we must build a tlist containing all grouping columns, * Otherwise, we must build a tlist containing all grouping columns, plus
* plus any other Vars mentioned in the targetlist and HAVING qual. * any other Vars mentioned in the targetlist and HAVING qual.
*/ */
sub_tlist = NIL; sub_tlist = NIL;
non_group_cols = NIL; non_group_cols = NIL;
@ -2705,8 +2705,8 @@ make_subplanTargetList(PlannerInfo *root,
else else
{ {
/* /*
* Non-grouping column, so just remember the expression * Non-grouping column, so just remember the expression for
* for later call to pull_var_clause. There's no need for * later call to pull_var_clause. There's no need for
* pull_var_clause to examine the TargetEntry node itself. * pull_var_clause to examine the TargetEntry node itself.
*/ */
non_group_cols = lappend(non_group_cols, tle->expr); non_group_cols = lappend(non_group_cols, tle->expr);

View File

@ -1822,8 +1822,8 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
} }
/* /*
* Don't recurse into the arguments of an outer PHV or aggregate here. * Don't recurse into the arguments of an outer PHV or aggregate here. Any
* Any SubLinks in the arguments have to be dealt with at the outer query * SubLinks in the arguments have to be dealt with at the outer query
* level; they'll be handled when build_subplan collects the PHV or Aggref * level; they'll be handled when build_subplan collects the PHV or Aggref
* into the arguments to be passed down to the current subplan. * into the arguments to be passed down to the current subplan.
*/ */

View File

@ -332,6 +332,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root, j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg, j->rarg,
&child_rels); &child_rels);
/* /*
* Now recursively process the pulled-up quals. Any inserted * Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg, * joins can get stacked onto either j->larg or j->rarg,
@ -357,6 +358,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root, j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg, j->rarg,
&child_rels); &child_rels);
/* /*
* Now recursively process the pulled-up quals. Any inserted * Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg, * joins can get stacked onto either j->larg or j->rarg,
@ -384,6 +386,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root, j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg, j->rarg,
&child_rels); &child_rels);
/* /*
* Now recursively process the pulled-up quals. Any inserted * Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg, * joins can get stacked onto either j->larg or j->rarg,
@ -409,6 +412,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root, j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg, j->rarg,
&child_rels); &child_rels);
/* /*
* Now recursively process the pulled-up quals. Any inserted * Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg, * joins can get stacked onto either j->larg or j->rarg,
@ -448,11 +452,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root, j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg, j->rarg,
&child_rels); &child_rels);
/* /*
* Now recursively process the pulled-up quals. Because * Now recursively process the pulled-up quals. Because
* we are underneath a NOT, we can't pull up sublinks * we are underneath a NOT, we can't pull up sublinks that
* that reference the left-hand stuff, but it's still * reference the left-hand stuff, but it's still okay to
* okay to pull up sublinks referencing j->rarg. * pull up sublinks referencing j->rarg.
*/ */
j->quals = pull_up_sublinks_qual_recurse(root, j->quals = pull_up_sublinks_qual_recurse(root,
j->quals, j->quals,
@ -473,11 +478,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root, j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg, j->rarg,
&child_rels); &child_rels);
/* /*
* Now recursively process the pulled-up quals. Because * Now recursively process the pulled-up quals. Because
* we are underneath a NOT, we can't pull up sublinks * we are underneath a NOT, we can't pull up sublinks that
* that reference the left-hand stuff, but it's still * reference the left-hand stuff, but it's still okay to
* okay to pull up sublinks referencing j->rarg. * pull up sublinks referencing j->rarg.
*/ */
j->quals = pull_up_sublinks_qual_recurse(root, j->quals = pull_up_sublinks_qual_recurse(root,
j->quals, j->quals,

Some files were not shown because too many files have changed in this diff Show More