Run pgindent on 9.2 source tree in preparation for first 9.3

commit-fest.
This commit is contained in:
Bruce Momjian 2012-06-10 15:20:04 -04:00
parent 60801944fa
commit 927d61eeff
494 changed files with 7343 additions and 7046 deletions

View File

@ -429,8 +429,8 @@ fileGetForeignRelSize(PlannerInfo *root,
FileFdwPlanState *fdw_private;
/*
* Fetch options. We only need filename at this point, but we might
* as well get everything and not need to re-fetch it later in planning.
* Fetch options. We only need filename at this point, but we might as
* well get everything and not need to re-fetch it later in planning.
*/
fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState));
fileGetOptions(foreigntableid,
@ -474,7 +474,8 @@ fileGetForeignPaths(PlannerInfo *root,
/*
* If data file was sorted, and we knew it somehow, we could insert
* appropriate pathkeys into the ForeignPath node to tell the planner that.
* appropriate pathkeys into the ForeignPath node to tell the planner
* that.
*/
}
@ -671,8 +672,8 @@ fileAnalyzeForeignTable(Relation relation,
fileGetOptions(RelationGetRelid(relation), &filename, &options);
/*
* Get size of the file. (XXX if we fail here, would it be better to
* just return false to skip analyzing the table?)
* Get size of the file. (XXX if we fail here, would it be better to just
* return false to skip analyzing the table?)
*/
if (stat(filename, &stat_buf) < 0)
ereport(ERROR,
@ -853,8 +854,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
cstate = BeginCopyFrom(onerel, filename, NIL, options);
/*
* Use per-tuple memory context to prevent leak of memory used to read rows
* from the file with Copy routines.
* Use per-tuple memory context to prevent leak of memory used to read
* rows from the file with Copy routines.
*/
tupcontext = AllocSetContextCreate(CurrentMemoryContext,
"file_fdw temporary context",
@ -912,8 +913,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
if (rowstoskip <= 0)
{
/*
* Found a suitable tuple, so save it, replacing one
* old tuple at random
* Found a suitable tuple, so save it, replacing one old tuple
* at random
*/
int k = (int) (targrows * anl_random_fract());

View File

@ -140,8 +140,9 @@ CleanupPriorWALFiles(void)
strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
{
/*
* Use the original file name again now, including any extension
* that might have been chopped off before testing the sequence.
* Use the original file name again now, including any
* extension that might have been chopped off before testing
* the sequence.
*/
snprintf(WALFilePath, MAXPGPATH, "%s/%s",
archiveLocation, xlde->d_name);
@ -298,7 +299,8 @@ main(int argc, char **argv)
dryrun = true;
break;
case 'x':
additional_ext = optarg; /* Extension to remove from xlogfile names */
additional_ext = optarg; /* Extension to remove from
* xlogfile names */
break;
default:
fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);

View File

@ -248,21 +248,21 @@ static uint32 pgss_hash_string(const char *str);
static void pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows,
const BufferUsage *bufusage,
pgssJumbleState * jstate);
pgssJumbleState *jstate);
static Size pgss_memsize(void);
static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
int query_len, bool sticky);
static void entry_dealloc(void);
static void entry_reset(void);
static void AppendJumble(pgssJumbleState * jstate,
static void AppendJumble(pgssJumbleState *jstate,
const unsigned char *item, Size size);
static void JumbleQuery(pgssJumbleState * jstate, Query *query);
static void JumbleRangeTable(pgssJumbleState * jstate, List *rtable);
static void JumbleExpr(pgssJumbleState * jstate, Node *node);
static void RecordConstLocation(pgssJumbleState * jstate, int location);
static char *generate_normalized_query(pgssJumbleState * jstate, const char *query,
static void JumbleQuery(pgssJumbleState *jstate, Query *query);
static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
static void JumbleExpr(pgssJumbleState *jstate, Node *node);
static void RecordConstLocation(pgssJumbleState *jstate, int location);
static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding);
static void fill_in_constant_lengths(pgssJumbleState * jstate, const char *query);
static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query);
static int comp_location(const void *a, const void *b);
@ -513,8 +513,8 @@ pgss_shmem_startup(void)
FreeFile(file);
/*
* Remove the file so it's not included in backups/replication
* slaves, etc. A new file will be written on next shutdown.
* Remove the file so it's not included in backups/replication slaves,
* etc. A new file will be written on next shutdown.
*/
unlink(PGSS_DUMP_FILE);
@ -626,8 +626,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
* the statement contains an optimizable statement for which a queryId
* could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases,
* runtime control will first go through ProcessUtility and then the
* executor, and we don't want the executor hooks to do anything, since
* we are already measuring the statement's costs at the utility level.
* executor, and we don't want the executor hooks to do anything, since we
* are already measuring the statement's costs at the utility level.
*/
if (query->utilityStmt)
{
@ -789,10 +789,9 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
DestReceiver *dest, char *completionTag)
{
/*
* If it's an EXECUTE statement, we don't track it and don't increment
* the nesting level. This allows the cycles to be charged to the
* underlying PREPARE instead (by the Executor hooks), which is much more
* useful.
* If it's an EXECUTE statement, we don't track it and don't increment the
* nesting level. This allows the cycles to be charged to the underlying
* PREPARE instead (by the Executor hooks), which is much more useful.
*
* We also don't track execution of PREPARE. If we did, we would get one
* hash table entry for the PREPARE (with hash calculated from the query
@ -942,7 +941,7 @@ static void
pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows,
const BufferUsage *bufusage,
pgssJumbleState * jstate)
pgssJumbleState *jstate)
{
pgssHashKey key;
pgssEntry *entry;
@ -1355,7 +1354,7 @@ entry_reset(void)
* the current jumble.
*/
static void
AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size)
{
unsigned char *jumble = jstate->jumble;
Size jumble_len = jstate->jumble_len;
@ -1404,7 +1403,7 @@ AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
* of information).
*/
static void
JumbleQuery(pgssJumbleState * jstate, Query *query)
JumbleQuery(pgssJumbleState *jstate, Query *query)
{
Assert(IsA(query, Query));
Assert(query->utilityStmt == NULL);
@ -1431,7 +1430,7 @@ JumbleQuery(pgssJumbleState * jstate, Query *query)
* Jumble a range table
*/
static void
JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
{
ListCell *lc;
@ -1489,7 +1488,7 @@ JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
* about any unrecognized node type.
*/
static void
JumbleExpr(pgssJumbleState * jstate, Node *node)
JumbleExpr(pgssJumbleState *jstate, Node *node)
{
ListCell *temp;
@ -1874,7 +1873,7 @@ JumbleExpr(pgssJumbleState * jstate, Node *node)
* that is currently being walked.
*/
static void
RecordConstLocation(pgssJumbleState * jstate, int location)
RecordConstLocation(pgssJumbleState *jstate, int location)
{
/* -1 indicates unknown or undefined location */
if (location >= 0)
@ -1909,7 +1908,7 @@ RecordConstLocation(pgssJumbleState * jstate, int location)
* Returns a palloc'd string, which is not necessarily null-terminated.
*/
static char *
generate_normalized_query(pgssJumbleState * jstate, const char *query,
generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding)
{
char *norm_query;
@ -2015,7 +2014,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
* reason for a constant to start with a '-'.
*/
static void
fill_in_constant_lengths(pgssJumbleState * jstate, const char *query)
fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
{
pgssLocationLen *locs;
core_yyscan_t yyscanner;

View File

@ -77,6 +77,7 @@ static void test_sync(int writes_per_op);
static void test_open_syncs(void);
static void test_open_sync(const char *msg, int writes_size);
static void test_file_descriptor_sync(void);
#ifndef WIN32
static void process_alarm(int sig);
#else

View File

@ -101,10 +101,16 @@ test_timing(int32 duration)
uint64 total_time;
int64 time_elapsed = 0;
uint64 loop_count = 0;
uint64 prev, cur;
int32 diff, i, bits, found;
uint64 prev,
cur;
int32 diff,
i,
bits,
found;
instr_time start_time, end_time, temp;
instr_time start_time,
end_time,
temp;
static int64 histogram[32];
char buf[100];

View File

@ -199,9 +199,9 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
* trigram extraction is relatively CPU-expensive. We must include
* strategy number because trigram extraction depends on strategy.
*
* The cached structure contains the strategy number, then the input
* query (starting at a MAXALIGN boundary), then the TRGM value (also
* starting at a MAXALIGN boundary).
* The cached structure contains the strategy number, then the input query
* (starting at a MAXALIGN boundary), then the TRGM value (also starting
* at a MAXALIGN boundary).
*/
if (cache == NULL ||
strategy != *((StrategyNumber *) cache) ||
@ -341,8 +341,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
char *cache = (char *) fcinfo->flinfo->fn_extra;
/*
* Cache the generated trigrams across multiple calls with the same
* query.
* Cache the generated trigrams across multiple calls with the same query.
*/
if (cache == NULL ||
VARSIZE(cache) != querysize ||

View File

@ -238,7 +238,8 @@ check_cluster_versions(void)
/*
* We can't allow downgrading because we use the target pg_dumpall, and
* pg_dumpall cannot operate on new database versions, only older versions.
* pg_dumpall cannot operate on new database versions, only older
* versions.
*/
if (old_cluster.major_version > new_cluster.major_version)
pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
@ -764,9 +765,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
PGconn *conn = connectToServer(cluster, active_db->db_name);
/*
* While several relkinds don't store any data, e.g. views, they
* can be used to define data types of other columns, so we
* check all relkinds.
* While several relkinds don't store any data, e.g. views, they can
* be used to define data types of other columns, so we check all
* relkinds.
*/
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname "
@ -836,9 +837,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
static void
get_bin_version(ClusterInfo *cluster)
{
char cmd[MAXPGPATH], cmd_output[MAX_STRING];
char cmd[MAXPGPATH],
cmd_output[MAX_STRING];
FILE *output;
int pre_dot, post_dot;
int pre_dot,
post_dot;
snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir);
@ -858,4 +861,3 @@ get_bin_version(ClusterInfo *cluster)
cluster->bin_version = (pre_dot * 100 + post_dot) * 100;
}

View File

@ -129,6 +129,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
pg_log(PG_VERBOSE, "%s", bufin);
#ifdef WIN32
/*
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a

View File

@ -18,6 +18,7 @@
static void check_data_dir(const char *pg_data);
static void check_bin_dir(ClusterInfo *cluster);
static void validate_exec(const char *dir, const char *cmdName);
#ifdef WIN32
static int win32_check_directory_write_permissions(void);
#endif

View File

@ -233,7 +233,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
* large number of times.
*/
int
load_directory(const char *dirname, struct dirent ***namelist)
load_directory(const char *dirname, struct dirent *** namelist)
{
DIR *dirdesc;
struct dirent *direntry;
@ -314,7 +314,6 @@ win32_pghardlink(const char *src, const char *dst)
else
return 0;
}
#endif
@ -330,5 +329,3 @@ fopen_priv(const char *path, const char *mode)
return fp;
}

View File

@ -144,8 +144,8 @@ get_loadable_libraries(void)
PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
/*
* Fetch all libraries referenced in this DB. We can't exclude
* the "pg_catalog" schema because, while such functions are not
* Fetch all libraries referenced in this DB. We can't exclude the
* "pg_catalog" schema because, while such functions are not
* explicitly dumped by pg_dump, they do reference implicit objects
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
*/
@ -164,8 +164,8 @@ get_loadable_libraries(void)
* pg_dumpall to dump it. However that function still references
* "plpython" (no "2"), so it throws an error on restore. This code
* checks for the problem function, reports affected databases to the
* user and explains how to remove them.
* 8.1 git commit: e0dedd0559f005d60c69c9772163e69c204bac69
* user and explains how to remove them. 8.1 git commit:
* e0dedd0559f005d60c69c9772163e69c204bac69
* http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
* http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
*/
@ -294,16 +294,16 @@ check_loadable_libraries(void)
/*
* In Postgres 9.0, Python 3 support was added, and to do that, a
* plpython2u language was created with library name plpython2.so
* as a symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and
* plpython2u pointing to it. For this reason, any reference to
* library name "plpython" in an old PG <= 9.1 cluster must look
* for "plpython2" in the new cluster.
* plpython2u language was created with library name plpython2.so as a
* symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and plpython2u
* pointing to it. For this reason, any reference to library name
* "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
* the new cluster.
*
* For this case, we could check pg_pltemplate, but that only works
* for languages, and does not help with function shared objects,
* so we just do a general fix.
* for languages, and does not help with function shared objects, so
* we just do a general fix.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
strcmp(lib, "$libdir/plpython") == 0)

View File

@ -57,12 +57,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
old_db->db_name, old_rel->reloid, new_rel->reloid);
/*
* TOAST table names initially match the heap pg_class oid.
* In pre-8.4, TOAST table names change during CLUSTER; in pre-9.0,
* TOAST table names change during ALTER TABLE ALTER COLUMN SET TYPE.
* In >= 9.0, TOAST relation names always use heap table oids, hence
* we cannot check relation names when upgrading from pre-9.0.
* Clusters upgraded to 9.0 will get matching TOAST names.
* TOAST table names initially match the heap pg_class oid. In
* pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST
* table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
* 9.0, TOAST relation names always use heap table oids, hence we
* cannot check relation names when upgrading from pre-9.0. Clusters
* upgraded to 9.0 will get matching TOAST names.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||

View File

@ -139,9 +139,9 @@ parseCommandLine(int argc, char *argv[])
break;
/*
* Someday, the port number option could be removed and
* passed using -o/-O, but that requires postmaster -C
* to be supported on all old/new versions.
* Someday, the port number option could be removed and passed
* using -o/-O, but that requires postmaster -C to be
* supported on all old/new versions.
*/
case 'p':
if ((old_cluster.port = atoi(optarg)) <= 0)
@ -322,8 +322,10 @@ void
adjust_data_dir(ClusterInfo *cluster)
{
char filename[MAXPGPATH];
char cmd[MAXPGPATH], cmd_output[MAX_STRING];
FILE *fp, *output;
char cmd[MAXPGPATH],
cmd_output[MAX_STRING];
FILE *fp,
*output;
/* If there is no postgresql.conf, it can't be a config-only dir */
snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig);
@ -345,10 +347,9 @@ adjust_data_dir(ClusterInfo *cluster)
CLUSTER_NAME(cluster));
/*
* We don't have a data directory yet, so we can't check the PG
* version, so this might fail --- only works for PG 9.2+. If this
* fails, pg_upgrade will fail anyway because the data files will not
* be found.
* We don't have a data directory yet, so we can't check the PG version,
* so this might fail --- only works for PG 9.2+. If this fails,
* pg_upgrade will fail anyway because the data files will not be found.
*/
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
cluster->bindir, cluster->pgconfig);

View File

@ -122,11 +122,10 @@ main(int argc, char **argv)
stop_postmaster(false);
/*
* Most failures happen in create_new_objects(), which has
* completed at this point. We do this here because it is just
* before linking, which will link the old and new cluster data
* files, preventing the old cluster from being safely started
* once the new cluster is started.
* Most failures happen in create_new_objects(), which has completed at
* this point. We do this here because it is just before linking, which
* will link the old and new cluster data files, preventing the old
* cluster from being safely started once the new cluster is started.
*/
if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
disable_old_cluster();

View File

@ -222,9 +222,11 @@ typedef struct
ControlData controldata; /* pg_control information */
DbInfoArr dbarr; /* dbinfos array */
char *pgdata; /* pathname for cluster's $PGDATA directory */
char *pgconfig; /* pathname for cluster's config file directory */
char *pgconfig; /* pathname for cluster's config file
* directory */
char *bindir; /* pathname for cluster's executable directory */
char *pgopts; /* options to pass to the server, like pg_ctl -o */
char *pgopts; /* options to pass to the server, like pg_ctl
* -o */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
char major_version_str[64]; /* string PG_VERSION of cluster */
@ -314,9 +316,10 @@ void split_old_dump(void);
/* exec.c */
int exec_prog(bool throw_error, bool is_priv,
const char *log_file, const char *cmd, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
int
exec_prog(bool throw_error, bool is_priv,
const char *log_file, const char *cmd,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
void verify_directories(void);
bool is_server_running(const char *datadir);
@ -353,7 +356,7 @@ const char *setupPageConverter(pageCnvCtx **result);
typedef void *pageCnvCtx;
#endif
int load_directory(const char *dirname, struct dirent ***namelist);
int load_directory(const char *dirname, struct dirent *** namelist);
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
@ -399,8 +402,9 @@ void init_tablespaces(void);
/* server.c */
PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
PGresult *executeQueryOrDie(PGconn *conn, const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
PGresult *
executeQueryOrDie(PGconn *conn, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void start_postmaster(ClusterInfo *cluster);
void stop_postmaster(bool fast);
@ -413,12 +417,15 @@ void check_pghost_envvar(void);
char *quote_identifier(const char *s);
int get_user_info(char **user_name);
void check_ok(void);
void report_status(eLogType type, const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void pg_log(eLogType type, char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void prep_status(const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void
report_status(eLogType type, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
pg_log(eLogType type, char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
prep_status(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void check_ok(void);
char *pg_strdup(const char *s);
void *pg_malloc(int size);

View File

@ -34,7 +34,8 @@ const char *
transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
{
int old_dbnum, new_dbnum;
int old_dbnum,
new_dbnum;
const char *msg = NULL;
prep_status("%s user relation files\n",
@ -45,15 +46,16 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
old_dbnum < old_db_arr->ndbs;
old_dbnum++, new_dbnum++)
{
DbInfo *old_db = &old_db_arr->dbs[old_dbnum], *new_db = NULL;
DbInfo *old_db = &old_db_arr->dbs[old_dbnum],
*new_db = NULL;
FileNameMap *mappings;
int n_maps;
pageCnvCtx *pageConverter = NULL;
/*
* Advance past any databases that exist in the new cluster
* but not in the old, e.g. "postgres". (The user might
* have removed the 'postgres' database from the old cluster.)
* Advance past any databases that exist in the new cluster but not in
* the old, e.g. "postgres". (The user might have removed the
* 'postgres' database from the old cluster.)
*/
for (; new_dbnum < new_db_arr->ndbs; new_dbnum++)
{
@ -83,8 +85,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
}
}
prep_status(" "); /* in case nothing printed; pass a space so gcc
* doesn't complain about empty format
prep_status(" "); /* in case nothing printed; pass a space so
* gcc doesn't complain about empty format
* string */
check_ok();

View File

@ -66,7 +66,7 @@
typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t;
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#elif defined(ENABLE_THREAD_SAFETY)
/* Use platform-dependent pthread capability */
@ -84,7 +84,7 @@ static int pthread_join(pthread_t th, void **thread_return);
typedef struct fork_pthread *pthread_t;
typedef int pthread_attr_t;
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#endif
@ -1267,7 +1267,8 @@ init(void)
* versions. Since pgbench has never pretended to be fully TPC-B
* compliant anyway, we stick with the historical behavior.
*/
struct ddlinfo {
struct ddlinfo
{
char *table;
char *cols;
int declare_fillfactor;
@ -1321,14 +1322,15 @@ init(void)
/* Construct new create table statement. */
opts[0] = '\0';
if (ddl->declare_fillfactor)
snprintf(opts+strlen(opts), 256-strlen(opts),
snprintf(opts + strlen(opts), 256 - strlen(opts),
" with (fillfactor=%d)", fillfactor);
if (tablespace != NULL)
{
char *escape_tablespace;
escape_tablespace = PQescapeIdentifier(con, tablespace,
strlen(tablespace));
snprintf(opts+strlen(opts), 256-strlen(opts),
snprintf(opts + strlen(opts), 256 - strlen(opts),
" tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
@ -1411,9 +1413,10 @@ init(void)
if (index_tablespace != NULL)
{
char *escape_tablespace;
escape_tablespace = PQescapeIdentifier(con, index_tablespace,
strlen(index_tablespace));
snprintf(buffer+strlen(buffer), 256-strlen(buffer),
snprintf(buffer + strlen(buffer), 256 - strlen(buffer),
" using index tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
@ -2571,7 +2574,7 @@ typedef struct fork_pthread
static int
pthread_create(pthread_t *thread,
pthread_attr_t * attr,
pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{
@ -2687,7 +2690,7 @@ win32_pthread_run(void *arg)
static int
pthread_create(pthread_t *thread,
pthread_attr_t * attr,
pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{

View File

@ -34,8 +34,8 @@ char *
px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
{
static char *magic = "$1$"; /* This string is magic for this algorithm.
* Having it this way, we can get better
* later on */
* Having it this way, we can get better later
* on */
static char *p;
static const char *sp,
*ep;

View File

@ -204,8 +204,9 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name);
void px_set_debug_handler(void (*handler) (const char *));
#ifdef PX_DEBUG
void px_debug(const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void
px_debug(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
#else
#define px_debug(...)
#endif

View File

@ -42,9 +42,9 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
Form_pg_database datForm;
/*
* Oid of the source database is not saved in pg_database catalog,
* so we collect its identifier using contextual information.
* If NULL, its default is "template1" according to createdb().
* Oid of the source database is not saved in pg_database catalog, so we
* collect its identifier using contextual information. If NULL, its
* default is "template1" according to createdb().
*/
if (!dtemplate)
dtemplate = "template1";
@ -56,6 +56,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
tcontext = sepgsql_get_label(object.classId,
object.objectId,
object.objectSubId);
/*
* check db_database:{getattr} permission
*/
@ -67,11 +68,11 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
true);
/*
* Compute a default security label of the newly created database
* based on a pair of security label of client and source database.
* Compute a default security label of the newly created database based on
* a pair of security label of client and source database.
*
* XXX - uncoming version of libselinux supports to take object
* name to handle special treatment on default security label.
* XXX - uncoming version of libselinux supports to take object name to
* handle special treatment on default security label.
*/
rel = heap_open(DatabaseRelationId, AccessShareLock);
@ -91,6 +92,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
SEPG_CLASS_DB_DATABASE);
/*
* check db_database:{create} permission
*/
@ -170,6 +172,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel)
SEPG_DB_DATABASE__RELABELFROM,
audit_name,
true);
/*
* check db_database:{relabelto} permission
*/

View File

@ -115,9 +115,8 @@ sepgsql_object_access(ObjectAccessType access,
* All cases we want to apply permission checks on
* creation of a new relation are invocation of the
* heap_create_with_catalog via DefineRelation or
* OpenIntoRel.
* Elsewhere, we need neither assignment of security
* label nor permission checks.
* OpenIntoRel. Elsewhere, we need neither assignment
* of security label nor permission checks.
*/
switch (sepgsql_context_info.cmdtype)
{
@ -150,12 +149,12 @@ sepgsql_object_access(ObjectAccessType access,
case OAT_DROP:
{
ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
/*
* No need to apply permission checks on object deletion
* due to internal cleanups; such as removal of temporary
* database object on session closed.
* No need to apply permission checks on object deletion due
* to internal cleanups; such as removal of temporary database
* object on session closed.
*/
if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0)
break;
@ -277,19 +276,20 @@ sepgsql_utility_command(Node *parsetree,
{
/*
* Check command tag to avoid nefarious operations, and save the
* current contextual information to determine whether we should
* apply permission checks here, or not.
* current contextual information to determine whether we should apply
* permission checks here, or not.
*/
sepgsql_context_info.cmdtype = nodeTag(parsetree);
switch (nodeTag(parsetree))
{
case T_CreatedbStmt:
/*
* We hope to reference name of the source database, but it
* does not appear in system catalog. So, we save it here.
*/
foreach (cell, ((CreatedbStmt *) parsetree)->options)
foreach(cell, ((CreatedbStmt *) parsetree)->options)
{
DefElem *defel = (DefElem *) lfirst(cell);
@ -303,6 +303,7 @@ sepgsql_utility_command(Node *parsetree,
break;
case T_LoadStmt:
/*
* We reject LOAD command across the board on enforcing mode,
* because a binary module can arbitrarily override hooks.
@ -315,6 +316,7 @@ sepgsql_utility_command(Node *parsetree,
}
break;
default:
/*
* Right now we don't check any other utility commands,
* because it needs more detailed information to make access

View File

@ -65,7 +65,8 @@ static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
* and already committed */
static char *client_label_func = NULL; /* set by trusted procedure */
typedef struct {
typedef struct
{
SubTransactionId subid;
char *label;
} pending_label;
@ -140,9 +141,10 @@ sepgsql_set_client_label(const char *new_label)
SEPG_PROCESS__DYNTRANSITION,
NULL,
true);
/*
* Append the supplied new_label on the pending list until
* the current transaction is committed.
* Append the supplied new_label on the pending list until the current
* transaction is committed.
*/
oldcxt = MemoryContextSwitchTo(CurTransactionContext);
@ -181,10 +183,11 @@ sepgsql_xact_callback(XactEvent event, void *arg)
pfree(client_label_committed);
client_label_committed = new_label;
/*
* XXX - Note that items of client_label_pending are allocated
* on CurTransactionContext, thus, all acquired memory region
* shall be released implicitly.
* XXX - Note that items of client_label_pending are allocated on
* CurTransactionContext, thus, all acquired memory region shall
* be released implicitly.
*/
client_label_pending = NIL;
}
@ -213,6 +216,7 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
for (cell = list_head(client_label_pending); cell; cell = next)
{
pending_label *plabel = lfirst(cell);
next = lnext(cell);
if (plabel->subid == mySubid)
@ -340,8 +344,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
/*
* process:transition permission between old and new label,
* when user tries to switch security label of the client
* on execution of trusted procedure.
* when user tries to switch security label of the client on
* execution of trusted procedure.
*/
if (stack->new_label)
sepgsql_avc_check_perms_label(stack->new_label,

View File

@ -77,6 +77,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object),
true);
/*
* XXX - db_language:{implement} also should be checked here
*/
@ -97,9 +98,10 @@ sepgsql_proc_post_create(Oid functionId)
*/
initStringInfo(&audit_name);
appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
for (i=0; i < proForm->pronargs; i++)
for (i = 0; i < proForm->pronargs; i++)
{
Oid typeoid = proForm->proargtypes.values[i];
if (i > 0)
appendStringInfoChar(&audit_name, ',');
appendStringInfoString(&audit_name, format_type_be(typeoid));
@ -111,6 +113,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_PROCEDURE__CREATE,
audit_name.data,
true);
/*
* Assign the default security label on a new procedure
*/
@ -198,6 +201,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
SEPG_DB_PROCEDURE__RELABELFROM,
audit_name,
true);
/*
* check db_procedure:{relabelto} permission
*/

View File

@ -44,7 +44,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
char *scontext;
char *tcontext;
char *ncontext;
char audit_name[2*NAMEDATALEN + 20];
char audit_name[2 * NAMEDATALEN + 20];
ObjectAddress object;
Form_pg_attribute attForm;
@ -84,6 +84,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_COLUMN);
/*
* check db_column:{create} permission
*/
@ -172,6 +173,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
SEPG_DB_COLUMN__RELABELFROM,
audit_name,
true);
/*
* check db_column:{relabelto} permission
*/
@ -203,7 +205,7 @@ sepgsql_relation_post_create(Oid relOid)
char *tcontext; /* schema */
char *rcontext; /* relation */
char *ccontext; /* column */
char audit_name[2*NAMEDATALEN + 20];
char audit_name[2 * NAMEDATALEN + 20];
/*
* Fetch catalog record of the new relation. Because pg_class entry is not
@ -254,6 +256,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object),
true);
/*
* Compute a default security label when we create a new relation object
* under the specified namespace.
@ -273,6 +276,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_DATABASE__CREATE,
audit_name,
true);
/*
* Assign the default security label on the new relation
*/
@ -315,6 +319,7 @@ sepgsql_relation_post_create(Oid relOid)
ccontext = sepgsql_compute_create(scontext,
rcontext,
SEPG_CLASS_DB_COLUMN);
/*
* check db_column:{create} permission
*/
@ -404,7 +409,7 @@ sepgsql_relation_drop(Oid relOid)
int i;
attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid));
for (i=0; i < attrList->n_members; i++)
for (i = 0; i < attrList->n_members; i++)
{
atttup = &attrList->members[i]->tuple;
attForm = (Form_pg_attribute) GETSTRUCT(atttup);
@ -468,6 +473,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
SEPG_DB_TABLE__RELABELFROM,
audit_name,
true);
/*
* check db_xxx:{relabelto} permission
*/

View File

@ -48,9 +48,9 @@ sepgsql_schema_post_create(Oid namespaceId)
* Compute a default security label when we create a new schema object
* under the working database.
*
* XXX - uncoming version of libselinux supports to take object
* name to handle special treatment on default security label;
* such as special label on "pg_temp" schema.
* XXX - uncoming version of libselinux supports to take object name to
* handle special treatment on default security label; such as special
* label on "pg_temp" schema.
*/
rel = heap_open(NamespaceRelationId, AccessShareLock);
@ -71,6 +71,7 @@ sepgsql_schema_post_create(Oid namespaceId)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
SEPG_CLASS_DB_SCHEMA);
/*
* check db_schema:{create}
*/
@ -149,6 +150,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
SEPG_DB_SCHEMA__RELABELFROM,
audit_name,
true);
/*
* check db_schema:{relabelto} permission
*/

View File

@ -248,6 +248,7 @@ extern bool sepgsql_check_perms(const char *scontext,
uint32 required,
const char *audit_name,
bool abort);
/*
* uavc.c
*/

View File

@ -67,8 +67,8 @@ static char *avc_unlabeled; /* system 'unlabeled' label */
static uint32
sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass)
{
return hash_any((const unsigned char *)scontext, strlen(scontext))
^ hash_any((const unsigned char *)tcontext, strlen(tcontext))
return hash_any((const unsigned char *) scontext, strlen(scontext))
^ hash_any((const unsigned char *) tcontext, strlen(tcontext))
^ tclass;
}
@ -220,12 +220,12 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
index = hash % AVC_NUM_SLOTS;
/*
* Validation check of the supplied security context.
* Because it always invoke system-call, frequent check should be avoided.
* Unless security policy is reloaded, validation status shall be kept, so
* we also cache whether the supplied security context was valid, or not.
* Validation check of the supplied security context. Because it always
* invoke system-call, frequent check should be avoided. Unless security
* policy is reloaded, validation status shall be kept, so we also cache
* whether the supplied security context was valid, or not.
*/
if (security_check_context_raw((security_context_t)tcontext) != 0)
if (security_check_context_raw((security_context_t) tcontext) != 0)
ucontext = sepgsql_avc_unlabeled();
/*
@ -237,15 +237,14 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
sepgsql_compute_avd(scontext, ucontext, tclass, &avd);
/*
* It also caches a security label to be switched when a client
* labeled as 'scontext' executes a procedure labeled as 'tcontext',
* not only access control decision on the procedure.
* The security label to be switched shall be computed uniquely on
* a pair of 'scontext' and 'tcontext', thus, it is reasonable to
* cache the new label on avc, and enables to reduce unnecessary
* system calls.
* It shall be referenced at sepgsql_needs_fmgr_hook to check whether
* the supplied function is a trusted procedure, or not.
* It also caches a security label to be switched when a client labeled as
* 'scontext' executes a procedure labeled as 'tcontext', not only access
* control decision on the procedure. The security label to be switched
* shall be computed uniquely on a pair of 'scontext' and 'tcontext',
* thus, it is reasonable to cache the new label on avc, and enables to
* reduce unnecessary system calls. It shall be referenced at
* sepgsql_needs_fmgr_hook to check whether the supplied function is a
* trusted procedure, or not.
*/
if (tclass == SEPG_CLASS_DB_PROCEDURE)
{
@ -314,7 +313,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
hash = sepgsql_avc_hash(scontext, tcontext, tclass);
index = hash % AVC_NUM_SLOTS;
foreach (cell, avc_slots[index])
foreach(cell, avc_slots[index])
{
cache = lfirst(cell);
@ -355,7 +354,8 @@ sepgsql_avc_check_perms_label(const char *tcontext,
bool result;
sepgsql_avc_check_valid();
do {
do
{
result = true;
/*
@ -384,9 +384,9 @@ sepgsql_avc_check_perms_label(const char *tcontext,
/*
* In permissive mode or permissive domain, violated permissions
* shall be audited to the log files at once, and then implicitly
* allowed to avoid a flood of access denied logs, because
* the purpose of permissive mode/domain is to collect a violation
* log that will make it possible to fix up the security policy.
* allowed to avoid a flood of access denied logs, because the
* purpose of permissive mode/domain is to collect a violation log
* that will make it possible to fix up the security policy.
*/
if (!sepgsql_getenforce() || cache->permissive)
cache->allowed |= required;
@ -397,10 +397,10 @@ sepgsql_avc_check_perms_label(const char *tcontext,
/*
* In the case when we have something auditable actions here,
* sepgsql_audit_log shall be called with text representation of
* security labels for both of subject and object.
* It records this access violation, so DBA will be able to find
* out unexpected security problems later.
* sepgsql_audit_log shall be called with text representation of security
* labels for both of subject and object. It records this access
* violation, so DBA will be able to find out unexpected security problems
* later.
*/
if (audited != 0 &&
audit_name != SEPGSQL_AVC_NOAUDIT &&
@ -461,7 +461,8 @@ sepgsql_avc_trusted_proc(Oid functionId)
tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG);
sepgsql_avc_check_valid();
do {
do
{
if (tcontext)
cache = sepgsql_avc_lookup(scontext, tcontext,
SEPG_CLASS_DB_PROCEDURE);
@ -508,12 +509,11 @@ sepgsql_avc_init(void)
avc_threshold = AVC_DEF_THRESHOLD;
/*
* SELinux allows to mmap(2) its kernel status page in read-only mode
* to inform userspace applications its status updating (such as
* policy reloading) without system-call invocations.
* This feature is only supported in Linux-2.6.38 or later, however,
* libselinux provides a fallback mode to know its status using
* netlink sockets.
* SELinux allows to mmap(2) its kernel status page in read-only mode to
* inform userspace applications its status updating (such as policy
* reloading) without system-call invocations. This feature is only
* supported in Linux-2.6.38 or later, however, libselinux provides a
* fallback mode to know its status using netlink sockets.
*/
rc = selinux_status_open(1);
if (rc < 0)

View File

@ -536,8 +536,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
/*
* Remember that SPI_prepare places plan in current memory context
* - so, we have to save plan in Top memory context for later
* use.
* - so, we have to save plan in Top memory context for later use.
*/
if (SPI_keepplan(pplan))
/* internal error */

View File

@ -261,8 +261,8 @@ vacuumlo(const char *database, const struct _param * param)
* We don't want to run each delete as an individual transaction, because
* the commit overhead would be high. However, since 9.0 the backend will
* acquire a lock per deleted LO, so deleting too many LOs per transaction
* risks running out of room in the shared-memory lock table.
* Accordingly, we delete up to transaction_limit LOs per transaction.
* risks running out of room in the shared-memory lock table. Accordingly,
* we delete up to transaction_limit LOs per transaction.
*/
res = PQexec(conn, "begin");
if (PQresultStatus(res) != PGRES_COMMAND_OK)

View File

@ -718,9 +718,9 @@ xpath_table(PG_FUNCTION_ARGS)
xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
/*
* Clear the values array, so that not-well-formed documents return
* NULL in all columns. Note that this also means that spare columns
* will be NULL.
* Clear the values array, so that not-well-formed documents
* return NULL in all columns. Note that this also means that
* spare columns will be NULL.
*/
for (j = 0; j < ret_tupdesc->natts; j++)
values[j] = NULL;

View File

@ -781,8 +781,8 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
{
/*
* Page was split while we looked elsewhere. We didn't see the
* downlink to the right page when we scanned the parent, so
* add it to the queue now.
* downlink to the right page when we scanned the parent, so add
* it to the queue now.
*
* Put the right page ahead of the queue, so that we visit it
* next. That's important, because if this is the lowest internal

View File

@ -716,8 +716,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
/*
* All the downlinks on the old root page are now on one of the child
* pages. Visit all the new child pages to memorize the parents of
* the grandchildren.
* pages. Visit all the new child pages to memorize the parents of the
* grandchildren.
*/
if (gfbb->rootlevel > 1)
{
@ -734,8 +734,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
UnlockReleaseBuffer(childbuf);
/*
* Also remember that the parent of the new child page is
* the root block.
* Also remember that the parent of the new child page is the
* root block.
*/
gistMemorizeParent(buildstate, childblkno, GIST_ROOT_BLKNO);
}
@ -789,8 +789,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
* Remember the parent of each new child page in our parent map.
* This assumes that the downlinks fit on the parent page. If the
* parent page is split, too, when we recurse up to insert the
* downlinks, the recursive gistbufferinginserttuples() call
* will update the map again.
* downlinks, the recursive gistbufferinginserttuples() call will
* update the map again.
*/
if (level > 0)
gistMemorizeParent(buildstate,
@ -881,6 +881,7 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
{
ItemId iid = PageGetItemId(page, *downlinkoffnum);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{
/* Still there */
@ -889,16 +890,17 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
}
/*
* Downlink was not at the offset where it used to be. Scan the page
* to find it. During normal gist insertions, it might've moved to another
* page, to the right, but during a buffering build, we keep track of
* the parent of each page in the lookup table so we should always know
* what page it's on.
* Downlink was not at the offset where it used to be. Scan the page to
* find it. During normal gist insertions, it might've moved to another
* page, to the right, but during a buffering build, we keep track of the
* parent of each page in the lookup table so we should always know what
* page it's on.
*/
for (off = FirstOffsetNumber; off <= maxoff; off = OffsetNumberNext(off))
{
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{
/* yes!!, found it */
@ -1181,6 +1183,7 @@ gistMemorizeAllDownlinks(GISTBuildState *buildstate, Buffer parentbuf)
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
gistMemorizeParent(buildstate, childblkno, parentblkno);
}
}

View File

@ -581,8 +581,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *gist
if (v->spl_equiv == NULL)
{
/*
* simple case: left and right keys for attno column are
* equal
* simple case: left and right keys for attno column are equal
*/
gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
}

View File

@ -224,8 +224,8 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters
* many pages' worth of consecutive dead tuples.
* higher code levels won't be able to stop a seqscan that encounters many
* pages' worth of consecutive dead tuples.
*/
CHECK_FOR_INTERRUPTS();
@ -1590,8 +1590,8 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
* When first_call is true (and thus, skip is initially false) we'll
* return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever),
* so we skip it and return the next match we find.
* Returning it again would be incorrect (and would loop forever), so
* we skip it and return the next match we find.
*/
if (!skip)
{
@ -1885,14 +1885,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
/*
* We're about to do the actual insert -- but check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to do the actual insert -- but check for conflict first, to
* avoid possibly having to roll back work we've just done.
*
* For a heap insert, we only need to check for table-level SSI locks.
* Our new tuple can't possibly conflict with existing tuple locks, and
* heap page locks are only consolidated versions of tuple locks; they do
* not lock "gaps" as index page locks do. So we don't need to identify
* a buffer before making the call.
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
* lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2123,11 +2123,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* We're about to do the actual inserts -- but check for conflict first,
* to avoid possibly having to roll back work we've just done.
*
* For a heap insert, we only need to check for table-level SSI locks.
* Our new tuple can't possibly conflict with existing tuple locks, and
* heap page locks are only consolidated versions of tuple locks; they do
* not lock "gaps" as index page locks do. So we don't need to identify
* a buffer before making the call.
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
* lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2140,9 +2140,8 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
int nthispage;
/*
* Find buffer where at least the next tuple will fit. If the page
* is all-visible, this will also pin the requisite visibility map
* page.
* Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
InvalidBuffer, options, bistate,
@ -2372,10 +2371,10 @@ heap_delete(Relation relation, ItemPointer tid,
page = BufferGetPage(buffer);
/*
* Before locking the buffer, pin the visibility map page if it appears
* to be necessary. Since we haven't got the lock yet, someone else might
* be in the middle of changing this, so we'll need to recheck after
* we have the lock.
* Before locking the buffer, pin the visibility map page if it appears to
* be necessary. Since we haven't got the lock yet, someone else might be
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@ -2753,10 +2752,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
page = BufferGetPage(buffer);
/*
* Before locking the buffer, pin the visibility map page if it appears
* to be necessary. Since we haven't got the lock yet, someone else might
* be in the middle of changing this, so we'll need to recheck after
* we have the lock.
* Before locking the buffer, pin the visibility map page if it appears to
* be necessary. Since we haven't got the lock yet, someone else might be
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@ -2900,11 +2899,11 @@ l2:
/*
* If we didn't pin the visibility map page and the page has become all
* visible while we were busy locking the buffer, or during some subsequent
* window during which we had it unlocked, we'll have to unlock and
* re-lock, to avoid holding the buffer lock across an I/O. That's a bit
* unfortunate, esepecially since we'll now have to recheck whether the
* tuple has been locked or updated under us, but hopefully it won't
* visible while we were busy locking the buffer, or during some
* subsequent window during which we had it unlocked, we'll have to unlock
* and re-lock, to avoid holding the buffer lock across an I/O. That's a
* bit unfortunate, esepecially since we'll now have to recheck whether
* the tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@ -3196,11 +3195,11 @@ l2:
/*
* Mark old tuple for invalidation from system caches at next command
* boundary, and mark the new tuple for invalidation in case we abort.
* We have to do this before releasing the buffer because oldtup is in
* the buffer. (heaptup is all in local memory, but it's necessary to
* process both tuple versions in one call to inval.c so we can avoid
* redundant sinval messages.)
* boundary, and mark the new tuple for invalidation in case we abort. We
* have to do this before releasing the buffer because oldtup is in the
* buffer. (heaptup is all in local memory, but it's necessary to process
* both tuple versions in one call to inval.c so we can avoid redundant
* sinval messages.)
*/
CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
@ -4731,17 +4730,16 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* We don't bump the LSN of the heap page when setting the visibility
* map bit, because that would generate an unworkable volume of
* full-page writes. This exposes us to torn page hazards, but since
* we're not inspecting the existing page contents in any way, we
* don't care.
* We don't bump the LSN of the heap page when setting the visibility map
* bit, because that would generate an unworkable volume of full-page
* writes. This exposes us to torn page hazards, but since we're not
* inspecting the existing page contents in any way, we don't care.
*
* However, all operations that clear the visibility map bit *do* bump
* the LSN, and those operations will only be replayed if the XLOG LSN
* follows the page LSN. Thus, if the page LSN has advanced past our
* XLOG record's LSN, we mustn't mark the page all-visible, because
* the subsequent update won't be replayed to clear the flag.
* However, all operations that clear the visibility map bit *do* bump the
* LSN, and those operations will only be replayed if the XLOG LSN follows
* the page LSN. Thus, if the page LSN has advanced past our XLOG
* record's LSN, we mustn't mark the page all-visible, because the
* subsequent update won't be replayed to clear the flag.
*/
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@ -4772,10 +4770,10 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* Don't set the bit if replay has already passed this point.
*
* It might be safe to do this unconditionally; if replay has past
* this point, we'll replay at least as far this time as we did before,
* and if this bit needs to be cleared, the record responsible for
* doing so should be again replayed, and clear it. For right now,
* out of an abundance of conservatism, we use the same test here
* this point, we'll replay at least as far this time as we did
* before, and if this bit needs to be cleared, the record responsible
* for doing so should be again replayed, and clear it. For right
* now, out of an abundance of conservatism, we use the same test here
* we did for the heap page; if this results in a dropped bit, no real
* harm is done; and the next VACUUM will fix it.
*/

View File

@ -302,11 +302,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
* block if one was given, taking suitable care with lock ordering and
* the possibility they are the same block.
*
* If the page-level all-visible flag is set, caller will need to clear
* both that and the corresponding visibility map bit. However, by the
* time we return, we'll have x-locked the buffer, and we don't want to
* do any I/O while in that state. So we check the bit here before
* taking the lock, and pin the page if it appears necessary.
* If the page-level all-visible flag is set, caller will need to
* clear both that and the corresponding visibility map bit. However,
* by the time we return, we'll have x-locked the buffer, and we don't
* want to do any I/O while in that state. So we check the bit here
* before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock.
*/
@ -348,22 +348,23 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We now have the target page (and the other buffer, if any) pinned
* and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now
* be out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to give
* up our locks, go get the pin(s) we failed to get earlier, and
* were performed before acquiring the lock, the results might now be
* out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to
* give up our locks, go get the pin(s) we failed to get earlier, and
* re-lock. That's pretty painful, but hopefully shouldn't happen
* often.
*
* Note that there's a small possibility that we didn't pin the
* page above but still have the correct page pinned anyway, either
* because we've already made a previous pass through this loop, or
* because caller passed us the right page anyway.
* Note that there's a small possibility that we didn't pin the page
* above but still have the correct page pinned anyway, either because
* we've already made a previous pass through this loop, or because
* caller passed us the right page anyway.
*
* Note also that it's possible that by the time we get the pin and
* retake the buffer locks, the visibility map bit will have been
* cleared by some other backend anyway. In that case, we'll have done
* a bit of extra work for no gain, but there's no real harm done.
* cleared by some other backend anyway. In that case, we'll have
* done a bit of extra work for no gain, but there's no real harm
* done.
*/
if (otherBuffer == InvalidBuffer || buffer <= otherBuffer)
GetVisibilityMapPins(relation, buffer, otherBuffer,

View File

@ -75,7 +75,7 @@ do { \
static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options);
struct varlena * oldexternal, int options);
static bool toastrel_valueid_exists(Relation toastrel, Oid valueid);
static bool toastid_valueid_exists(Oid toastrelid, Oid valueid);
static struct varlena *toast_fetch_datum(struct varlena * attr);
@ -1233,7 +1233,7 @@ toast_compress_datum(Datum value)
*/
static Datum
toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options)
struct varlena * oldexternal, int options)
{
Relation toastrel;
Relation toastidx;

View File

@ -356,16 +356,16 @@ visibilitymap_count(Relation rel)
BlockNumber result = 0;
BlockNumber mapBlock;
for (mapBlock = 0; ; mapBlock++)
for (mapBlock = 0;; mapBlock++)
{
Buffer mapBuffer;
unsigned char *map;
int i;
/*
* Read till we fall off the end of the map. We assume that any
* extra bytes in the last page are zeroed, so we don't bother
* excluding them from the count.
* Read till we fall off the end of the map. We assume that any extra
* bytes in the last page are zeroed, so we don't bother excluding
* them from the count.
*/
mapBuffer = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(mapBuffer))
@ -496,11 +496,11 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
Buffer buf;
/*
* We might not have opened the relation at the smgr level yet, or we might
* have been forced to close it by a sinval message. The code below won't
* necessarily notice relation extension immediately when extend = false,
* so we rely on sinval messages to ensure that our ideas about the size of
* the map aren't too far out of date.
* We might not have opened the relation at the smgr level yet, or we
* might have been forced to close it by a sinval message. The code below
* won't necessarily notice relation extension immediately when extend =
* false, so we rely on sinval messages to ensure that our ideas about the
* size of the map aren't too far out of date.
*/
RelationOpenSmgr(rel);

View File

@ -530,8 +530,8 @@ index_fetch_heap(IndexScanDesc scan)
if (got_heap_tuple)
{
/*
* Only in a non-MVCC snapshot can more than one member of the
* HOT chain be visible.
* Only in a non-MVCC snapshot can more than one member of the HOT
* chain be visible.
*/
scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot);
pgstat_count_heap_fetch(scan->indexRelation);

View File

@ -227,8 +227,8 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
}
/*
* Make a scan-lifespan context to hold array-associated data, or reset
* it if we already have one from a previous rescan cycle.
* Make a scan-lifespan context to hold array-associated data, or reset it
* if we already have one from a previous rescan cycle.
*/
if (so->arrayContext == NULL)
so->arrayContext = AllocSetContextCreate(CurrentMemoryContext,
@ -387,9 +387,10 @@ _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
/*
* Look up the appropriate comparison operator in the opfamily.
*
* Note: it's possible that this would fail, if the opfamily is incomplete,
* but it seems quite unlikely that an opfamily would omit non-cross-type
* comparison operators for any datatype that it supports at all.
* Note: it's possible that this would fail, if the opfamily is
* incomplete, but it seems quite unlikely that an opfamily would omit
* non-cross-type comparison operators for any datatype that it supports
* at all.
*/
cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1],
elemtype,
@ -455,9 +456,10 @@ _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
/*
* Look up the appropriate comparison function in the opfamily.
*
* Note: it's possible that this would fail, if the opfamily is incomplete,
* but it seems quite unlikely that an opfamily would omit non-cross-type
* support functions for any datatype that it supports at all.
* Note: it's possible that this would fail, if the opfamily is
* incomplete, but it seems quite unlikely that an opfamily would omit
* non-cross-type support functions for any datatype that it supports at
* all.
*/
cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
elemtype,
@ -781,8 +783,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
* set qual_ok to false and abandon further processing.
*
* We also have to deal with the case of "key IS NULL", which is
* unsatisfiable in combination with any other index condition.
* By the time we get here, that's been classified as an equality
* unsatisfiable in combination with any other index condition. By
* the time we get here, that's been classified as an equality
* check, and we've rejected any combination of it with a regular
* equality condition; but not with other types of conditions.
*/
@ -1424,9 +1426,9 @@ _bt_checkkeys(IndexScanDesc scan,
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a forward scan, however, we must keep going, because we
* may have initially positioned to the start of the index.
* because it's not possible for any future tuples to pass. On
* a forward scan, however, we must keep going, because we may
* have initially positioned to the start of the index.
*/
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir))
@ -1440,8 +1442,8 @@ _bt_checkkeys(IndexScanDesc scan,
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a backward scan, however, we must keep going, because we
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index.
*/
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
@ -1535,9 +1537,9 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a forward scan, however, we must keep going, because we
* may have initially positioned to the start of the index.
* because it's not possible for any future tuples to pass. On
* a forward scan, however, we must keep going, because we may
* have initially positioned to the start of the index.
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir))
@ -1551,8 +1553,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a backward scan, however, we must keep going, because we
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index.
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&

View File

@ -250,9 +250,9 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else
{
/*
* Tuple must be inserted into existing chain. We mustn't change
* the chain's head address, but we don't need to chase the entire
* chain to put the tuple at the end; we can insert it second.
* Tuple must be inserted into existing chain. We mustn't change the
* chain's head address, but we don't need to chase the entire chain
* to put the tuple at the end; we can insert it second.
*
* Also, it's possible that the "chain" consists only of a DEAD tuple,
* in which case we should replace the DEAD tuple in-place.
@ -516,9 +516,9 @@ moveLeafs(Relation index, SpGistState *state,
leafptr += newLeafTuple->size;
/*
* Now delete the old tuples, leaving a redirection pointer behind for
* the first one, unless we're doing an index build; in which case there
* can't be any concurrent scan so we need not provide a redirect.
* Now delete the old tuples, leaving a redirection pointer behind for the
* first one, unless we're doing an index build; in which case there can't
* be any concurrent scan so we need not provide a redirect.
*/
spgPageIndexMultiDelete(state, current->page, toDelete, nDelete,
state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
@ -754,8 +754,8 @@ doPickSplit(Relation index, SpGistState *state,
{
/*
* We are splitting the root (which up to now is also a leaf page).
* Its tuples are not linked, so scan sequentially to get them all.
* We ignore the original value of current->offnum.
* Its tuples are not linked, so scan sequentially to get them all. We
* ignore the original value of current->offnum.
*/
for (i = FirstOffsetNumber; i <= max; i++)
{
@ -924,8 +924,8 @@ doPickSplit(Relation index, SpGistState *state,
innerTuple->allTheSame = allTheSame;
/*
* Update nodes[] array to point into the newly formed innerTuple, so
* that we can adjust their downlinks below.
* Update nodes[] array to point into the newly formed innerTuple, so that
* we can adjust their downlinks below.
*/
SGITITERATE(innerTuple, i, node)
{
@ -944,13 +944,13 @@ doPickSplit(Relation index, SpGistState *state,
}
/*
* To perform the split, we must insert a new inner tuple, which can't
* go on a leaf page; and unless we are splitting the root page, we
* must then update the parent tuple's downlink to point to the inner
* tuple. If there is room, we'll put the new inner tuple on the same
* page as the parent tuple, otherwise we need another non-leaf buffer.
* But if the parent page is the root, we can't add the new inner tuple
* there, because the root page must have only one inner tuple.
* To perform the split, we must insert a new inner tuple, which can't go
* on a leaf page; and unless we are splitting the root page, we must then
* update the parent tuple's downlink to point to the inner tuple. If
* there is room, we'll put the new inner tuple on the same page as the
* parent tuple, otherwise we need another non-leaf buffer. But if the
* parent page is the root, we can't add the new inner tuple there,
* because the root page must have only one inner tuple.
*/
xlrec.initInner = false;
if (parent->buffer != InvalidBuffer &&
@ -977,22 +977,22 @@ doPickSplit(Relation index, SpGistState *state,
}
/*
* Because a WAL record can't involve more than four buffers, we can
* only afford to deal with two leaf pages in each picksplit action,
* ie the current page and at most one other.
* Because a WAL record can't involve more than four buffers, we can only
* afford to deal with two leaf pages in each picksplit action, ie the
* current page and at most one other.
*
* The new leaf tuples converted from the existing ones should require
* the same or less space, and therefore should all fit onto one page
* The new leaf tuples converted from the existing ones should require the
* same or less space, and therefore should all fit onto one page
* (although that's not necessarily the current page, since we can't
* delete the old tuples but only replace them with placeholders).
* However, the incoming new tuple might not also fit, in which case
* we might need another picksplit cycle to reduce it some more.
* However, the incoming new tuple might not also fit, in which case we
* might need another picksplit cycle to reduce it some more.
*
* If there's not room to put everything back onto the current page,
* then we decide on a per-node basis which tuples go to the new page.
* (We do it like that because leaf tuple chains can't cross pages,
* so we must place all leaf tuples belonging to the same parent node
* on the same page.)
* If there's not room to put everything back onto the current page, then
* we decide on a per-node basis which tuples go to the new page. (We do
* it like that because leaf tuple chains can't cross pages, so we must
* place all leaf tuples belonging to the same parent node on the same
* page.)
*
* If we are splitting the root page (turning it from a leaf page into an
* inner page), then no leaf tuples can go back to the current page; they
@ -1041,6 +1041,7 @@ doPickSplit(Relation index, SpGistState *state,
Min(totalLeafSizes,
SPGIST_PAGE_CAPACITY),
&xlrec.initDest);
/*
* Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space,
@ -1584,8 +1585,8 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.nodeI = parent->node;
/*
* obtain new buffer with the same parity as current, since it will
* be a child of same parent tuple
* obtain new buffer with the same parity as current, since it will be
* a child of same parent tuple
*/
current->buffer = SpGistGetBuffer(index,
GBUF_INNER_PARITY(current->blkno),
@ -1597,12 +1598,12 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.blknoNew = current->blkno;
/*
* Let's just make real sure new current isn't same as old. Right
* now that's impossible, but if SpGistGetBuffer ever got smart enough
* to delete placeholder tuples before checking space, maybe it
* wouldn't be impossible. The case would appear to work except that
* WAL replay would be subtly wrong, so I think a mere assert isn't
* enough here.
* Let's just make real sure new current isn't same as old. Right now
* that's impossible, but if SpGistGetBuffer ever got smart enough to
* delete placeholder tuples before checking space, maybe it wouldn't
* be impossible. The case would appear to work except that WAL
* replay would be subtly wrong, so I think a mere assert isn't enough
* here.
*/
if (xlrec.blknoNew == xlrec.blkno)
elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
@ -1707,9 +1708,9 @@ spgSplitNodeAction(Relation index, SpGistState *state,
Assert(!SpGistPageStoresNulls(current->page));
/*
* Construct new prefix tuple, containing a single node with the
* specified label. (We'll update the node's downlink to point to the
* new postfix tuple, below.)
* Construct new prefix tuple, containing a single node with the specified
* label. (We'll update the node's downlink to point to the new postfix
* tuple, below.)
*/
node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false);
@ -2077,8 +2078,8 @@ spgdoinsert(Relation index, SpGistState *state,
}
/*
* Loop around and attempt to insert the new leafDatum
* at "current" (which might reference an existing child
* Loop around and attempt to insert the new leafDatum at
* "current" (which might reference an existing child
* tuple, or might be invalid to force us to find a new
* page for the tuple).
*
@ -2102,8 +2103,8 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel);
/*
* Retry insertion into the enlarged node. We assume
* that we'll get a MatchNode result this time.
* Retry insertion into the enlarged node. We assume that
* we'll get a MatchNode result this time.
*/
goto process_inner_tuple;
break;

View File

@ -135,12 +135,12 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
/*
* Note: points that have coordinates exactly equal to coord may get
* classified into either node, depending on where they happen to fall
* in the sorted list. This is okay as long as the inner_consistent
* function descends into both sides for such cases. This is better
* than the alternative of trying to have an exact boundary, because
* it keeps the tree balanced even when we have many instances of the
* same point value. So we should never trigger the allTheSame logic.
* classified into either node, depending on where they happen to fall in
* the sorted list. This is okay as long as the inner_consistent function
* descends into both sides for such cases. This is better than the
* alternative of trying to have an exact boundary, because it keeps the
* tree balanced even when we have many instances of the same point value.
* So we should never trigger the allTheSame logic.
*/
for (i = 0; i < in->nTuples; i++)
{

View File

@ -177,6 +177,7 @@ spgbeginscan(PG_FUNCTION_ARGS)
{
Relation rel = (Relation) PG_GETARG_POINTER(0);
int keysz = PG_GETARG_INT32(1);
/* ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); */
IndexScanDesc scan;
SpGistScanOpaque so;

View File

@ -209,9 +209,9 @@ spg_text_choose(PG_FUNCTION_ARGS)
{
/*
* Descend to existing node. (If in->allTheSame, the core code will
* ignore our nodeN specification here, but that's OK. We still
* have to provide the correct levelAdd and restDatum values, and
* those are the same regardless of which node gets chosen by core.)
* ignore our nodeN specification here, but that's OK. We still have
* to provide the correct levelAdd and restDatum values, and those are
* the same regardless of which node gets chosen by core.)
*/
out->resultType = spgMatchNode;
out->result.matchNode.nodeN = i;
@ -227,10 +227,10 @@ spg_text_choose(PG_FUNCTION_ARGS)
else if (in->allTheSame)
{
/*
* Can't use AddNode action, so split the tuple. The upper tuple
* has the same prefix as before and uses an empty node label for
* the lower tuple. The lower tuple has no prefix and the same
* node labels as the original tuple.
* Can't use AddNode action, so split the tuple. The upper tuple has
* the same prefix as before and uses an empty node label for the
* lower tuple. The lower tuple has no prefix and the same node
* labels as the original tuple.
*/
out->resultType = spgSplitTuple;
out->result.splitTuple.prefixHasPrefix = in->hasPrefix;

View File

@ -276,8 +276,8 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else if (prevLive == InvalidOffsetNumber)
{
/*
* This is the first live tuple in the chain. It has
* to move to the head position.
* This is the first live tuple in the chain. It has to move
* to the head position.
*/
moveSrc[xlrec.nMove] = j;
moveDest[xlrec.nMove] = i;
@ -353,11 +353,11 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
InvalidBlockNumber, InvalidOffsetNumber);
/*
* We implement the move step by swapping the item pointers of the
* source and target tuples, then replacing the newly-source tuples
* with placeholders. This is perhaps unduly friendly with the page
* data representation, but it's fast and doesn't risk page overflow
* when a tuple to be relocated is large.
* We implement the move step by swapping the item pointers of the source
* and target tuples, then replacing the newly-source tuples with
* placeholders. This is perhaps unduly friendly with the page data
* representation, but it's fast and doesn't risk page overflow when a
* tuple to be relocated is large.
*/
for (i = 0; i < xlrec.nMove; i++)
{
@ -651,9 +651,9 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno)
/*
* The root pages must never be deleted, nor marked as available in FSM,
* because we don't want them ever returned by a search for a place to
* put a new tuple. Otherwise, check for empty/deletable page, and
* make sure FSM knows about it.
* because we don't want them ever returned by a search for a place to put
* a new tuple. Otherwise, check for empty/deletable page, and make sure
* FSM knows about it.
*/
if (!SpGistBlockIsRoot(blkno))
{
@ -741,11 +741,11 @@ spgprocesspending(spgBulkDeleteState *bds)
else
{
/*
* On an inner page, visit the referenced inner tuple and add
* all its downlinks to the pending list. We might have pending
* items for more than one inner tuple on the same page (in fact
* this is pretty likely given the way space allocation works),
* so get them all while we are here.
* On an inner page, visit the referenced inner tuple and add all
* its downlinks to the pending list. We might have pending items
* for more than one inner tuple on the same page (in fact this is
* pretty likely given the way space allocation works), so get
* them all while we are here.
*/
for (nitem = pitem; nitem != NULL; nitem = nitem->next)
{
@ -825,8 +825,8 @@ spgvacuumscan(spgBulkDeleteState *bds)
* physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
* delete some deletable tuples. See more extensive comments about
* this in btvacuumscan().
* delete some deletable tuples. See more extensive comments about this
* in btvacuumscan().
*/
blkno = SPGIST_METAPAGE_BLKNO + 1;
for (;;)

View File

@ -587,8 +587,8 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
{
/*
* We have it a bit easier here than in doPickSplit(),
* because we know the inner tuple's location already,
* so we can inject the correct redirection tuple now.
* because we know the inner tuple's location already, so
* we can inject the correct redirection tuple now.
*/
if (!state.isBuild)
spgPageIndexMultiDelete(&state, page,

View File

@ -927,8 +927,8 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
* other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is
* inferior to just picking some other slot. Testing shows the slot
* we pick instead will often be clean, allowing us to begin a read
* at once.
* we pick instead will often be clean, allowing us to begin a read at
* once.
*
* Normally the page_lru_count values will all be different and so
* there will be a well-defined LRU page. But since we allow
@ -997,10 +997,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If all pages (except possibly the latest one) are I/O busy, we'll
* have to wait for an I/O to complete and then retry. In that unhappy
* case, we choose to wait for the I/O on the least recently used slot,
* on the assumption that it was likely initiated first of all the I/Os
* in progress and may therefore finish first.
* have to wait for an I/O to complete and then retry. In that
* unhappy case, we choose to wait for the I/O on the least recently
* used slot, on the assumption that it was likely initiated first of
* all the I/Os in progress and may therefore finish first.
*/
if (best_valid_delta < 0)
{

View File

@ -362,6 +362,7 @@ GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts,
{
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
/* We need no extra lock since the GXACT isn't valid yet */
if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS)
{
@ -1063,9 +1064,9 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
* Mark the prepared transaction as valid. As soon as xact.c marks MyPgXact
* as not running our XID (which it will do immediately after this
* function returns), others can commit/rollback the xact.
* Mark the prepared transaction as valid. As soon as xact.c marks
* MyPgXact as not running our XID (which it will do immediately after
* this function returns), others can commit/rollback the xact.
*
* NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyPgXact,

View File

@ -174,8 +174,8 @@ GetNewTransactionId(bool isSubXact)
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
* are relying on fetch/store of an xid to be atomic, else other backends
* might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*

View File

@ -1019,6 +1019,7 @@ RecordTransactionCommit(void)
XLogRecData rdata[4];
int lastrdata = 0;
xl_xact_commit xlrec;
/*
* Set flags required for recovery processing of commits.
*/
@ -1074,6 +1075,7 @@ RecordTransactionCommit(void)
XLogRecData rdata[2];
int lastrdata = 0;
xl_xact_commit_compact xlrec;
xlrec.xact_time = xactStopTimestamp;
xlrec.nsubxacts = nchildren;
rdata[0].data = (char *) (&xlrec);
@ -2602,10 +2604,10 @@ CommitTransactionCommand(void)
break;
/*
* We were issued a RELEASE command, so we end the
* current subtransaction and return to the parent transaction.
* The parent might be ended too, so repeat till we find an
* INPROGRESS transaction or subtransaction.
* We were issued a RELEASE command, so we end the current
* subtransaction and return to the parent transaction. The parent
* might be ended too, so repeat till we find an INPROGRESS
* transaction or subtransaction.
*/
case TBLOCK_SUBRELEASE:
do
@ -2623,9 +2625,9 @@ CommitTransactionCommand(void)
* hierarchy and perform final commit. We do this by rolling up
* any subtransactions into their parent, which leads to O(N^2)
* operations with respect to resource owners - this isn't that
* bad until we approach a thousands of savepoints but is necessary
* for correctness should after triggers create new resource
* owners.
* bad until we approach a thousands of savepoints but is
* necessary for correctness should after triggers create new
* resource owners.
*/
case TBLOCK_SUBCOMMIT:
do
@ -4659,6 +4661,7 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
XLogFlush(lsn);
}
/*
* Utility function to call xact_redo_commit_internal after breaking down xlrec
*/

View File

@ -344,10 +344,10 @@ typedef struct XLogCtlInsert
/*
* fullPageWrites is the master copy used by all backends to determine
* whether to write full-page to WAL, instead of using process-local
* one. This is required because, when full_page_writes is changed
* by SIGHUP, we must WAL-log it before it actually affects
* WAL-logging by backends. Checkpointer sets at startup or after SIGHUP.
* whether to write full-page to WAL, instead of using process-local one.
* This is required because, when full_page_writes is changed by SIGHUP,
* we must WAL-log it before it actually affects WAL-logging by backends.
* Checkpointer sets at startup or after SIGHUP.
*/
bool fullPageWrites;
@ -455,8 +455,11 @@ typedef struct XLogCtlData
XLogRecPtr recoveryLastRecPtr;
/* timestamp of last COMMIT/ABORT record replayed (or being replayed) */
TimestampTz recoveryLastXTime;
/* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery */
/*
* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery
*/
TimestampTz currentChunkStartTime;
/* end of the last record restored from the archive */
XLogRecPtr restoreLastRecPtr;
@ -750,8 +753,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* insert lock, but it seems better to avoid doing CRC calculations while
* holding the lock.
*
* We add entries for backup blocks to the chain, so that they don't
* need any special treatment in the critical section where the chunks are
* We add entries for backup blocks to the chain, so that they don't need
* any special treatment in the critical section where the chunks are
* copied into the WAL buffers. Those entries have to be unlinked from the
* chain if we have to loop back here.
*/
@ -896,10 +899,10 @@ begin:;
/*
* Calculate CRC of the data, including all the backup blocks
*
* Note that the record header isn't added into the CRC initially since
* we don't know the prev-link yet. Thus, the CRC will represent the CRC
* of the whole record in the order: rdata, then backup blocks, then
* record header.
* Note that the record header isn't added into the CRC initially since we
* don't know the prev-link yet. Thus, the CRC will represent the CRC of
* the whole record in the order: rdata, then backup blocks, then record
* header.
*/
INIT_CRC32(rdata_crc);
for (rdt = rdata; rdt != NULL; rdt = rdt->next)
@ -948,10 +951,10 @@ begin:;
}
/*
* Also check to see if fullPageWrites or forcePageWrites was just turned on;
* if we weren't already doing full-page writes then go back and recompute.
* (If it was just turned off, we could recompute the record without full pages,
* but we choose not to bother.)
* Also check to see if fullPageWrites or forcePageWrites was just turned
* on; if we weren't already doing full-page writes then go back and
* recompute. (If it was just turned off, we could recompute the record
* without full pages, but we choose not to bother.)
*/
if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites)
{
@ -1583,7 +1586,7 @@ AdvanceXLInsertBuffer(bool new_segment)
* would miss the opportunity to compress a few records.
*/
if (!Insert->forcePageWrites)
NewPage->xlp_info |= XLP_BKP_REMOVABLE;
NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
/*
* If first page of an XLOG segment file, make it a long header.
@ -1827,11 +1830,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
Write->lastSegSwitchTime = (pg_time_t) time(NULL);
/*
* Request a checkpoint if we've consumed too
* much xlog since the last one. For speed, we first check
* using the local copy of RedoRecPtr, which might be out of
* date; if it looks like a checkpoint is needed, forcibly
* update RedoRecPtr and recheck.
* Request a checkpoint if we've consumed too much xlog since
* the last one. For speed, we first check using the local
* copy of RedoRecPtr, which might be out of date; if it looks
* like a checkpoint is needed, forcibly update RedoRecPtr and
* recheck.
*/
if (IsUnderPostmaster &&
XLogCheckpointNeeded(openLogId, openLogSeg))
@ -1945,9 +1948,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
}
/*
* Nudge the WALWriter: it has a full page of WAL to write, or we want
* it to come out of low-power mode so that this async commit will reach
* disk within the expected amount of time.
* Nudge the WALWriter: it has a full page of WAL to write, or we want it
* to come out of low-power mode so that this async commit will reach disk
* within the expected amount of time.
*/
if (ProcGlobal->walwriterLatch)
SetLatch(ProcGlobal->walwriterLatch);
@ -2076,8 +2079,8 @@ XLogFlush(XLogRecPtr record)
WriteRqstPtr = record;
/*
* Now wait until we get the write lock, or someone else does the
* flush for us.
* Now wait until we get the write lock, or someone else does the flush
* for us.
*/
for (;;)
{
@ -2273,7 +2276,8 @@ XLogBackgroundFlush(void)
/*
* If we wrote something then we have something to send to standbys also,
* otherwise the replication delay become around 7s with just async commit.
* otherwise the replication delay become around 7s with just async
* commit.
*/
if (wrote_something)
WalSndWakeup();
@ -2776,8 +2780,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
}
/*
* If the segment was fetched from archival storage, replace
* the existing xlog segment (if any) with the archival version.
* If the segment was fetched from archival storage, replace the existing
* xlog segment (if any) with the archival version.
*/
if (source == XLOG_FROM_ARCHIVE)
{
@ -5576,13 +5580,13 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
}
/*
* If we are establishing a new timeline, we have to copy data from
* the last WAL segment of the old timeline to create a starting WAL
* segment for the new timeline.
* If we are establishing a new timeline, we have to copy data from the
* last WAL segment of the old timeline to create a starting WAL segment
* for the new timeline.
*
* Notify the archiver that the last WAL segment of the old timeline
* is ready to copy to archival storage. Otherwise, it is not archived
* for a while.
* Notify the archiver that the last WAL segment of the old timeline is
* ready to copy to archival storage. Otherwise, it is not archived for a
* while.
*/
if (endTLI != ThisTimeLineID)
{
@ -5604,8 +5608,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
XLogArchiveCleanup(xlogpath);
/*
* Since there might be a partial WAL segment named RECOVERYXLOG,
* get rid of it.
* Since there might be a partial WAL segment named RECOVERYXLOG, get rid
* of it.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
unlink(recoveryPath); /* ignore any error */
@ -6323,11 +6327,11 @@ StartupXLOG(void)
/*
* Set backupStartPoint if we're starting recovery from a base backup.
*
* Set backupEndPoint and use minRecoveryPoint as the backup end location
* if we're starting recovery from a base backup which was taken from
* the standby. In this case, the database system status in pg_control must
* indicate DB_IN_ARCHIVE_RECOVERY. If not, which means that backup
* is corrupted, so we cancel recovery.
* Set backupEndPoint and use minRecoveryPoint as the backup end
* location if we're starting recovery from a base backup which was
* taken from the standby. In this case, the database system status in
* pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which
* means that backup is corrupted, so we cancel recovery.
*/
if (haveBackupLabel)
{
@ -6383,15 +6387,15 @@ StartupXLOG(void)
/*
* We're in recovery, so unlogged relations may be trashed and must be
* reset. This should be done BEFORE allowing Hot Standby connections,
* so that read-only backends don't try to read whatever garbage is
* left over from before.
* reset. This should be done BEFORE allowing Hot Standby
* connections, so that read-only backends don't try to read whatever
* garbage is left over from before.
*/
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
/*
* Likewise, delete any saved transaction snapshot files that got
* left behind by crashed backends.
* Likewise, delete any saved transaction snapshot files that got left
* behind by crashed backends.
*/
DeleteAllExportedSnapshotFiles();
@ -6489,10 +6493,11 @@ StartupXLOG(void)
/*
* Let postmaster know we've started redo now, so that it can launch
* checkpointer to perform restartpoints. We don't bother during crash
* recovery as restartpoints can only be performed during archive
* recovery. And we'd like to keep crash recovery simple, to avoid
* introducing bugs that could affect you when recovering after crash.
* checkpointer to perform restartpoints. We don't bother during
* crash recovery as restartpoints can only be performed during
* archive recovery. And we'd like to keep crash recovery simple, to
* avoid introducing bugs that could affect you when recovering after
* crash.
*
* After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are
@ -6649,8 +6654,8 @@ StartupXLOG(void)
{
/*
* We have reached the end of base backup, the point where
* the minimum recovery point in pg_control indicates.
* The data on disk is now consistent. Reset backupStartPoint
* the minimum recovery point in pg_control indicates. The
* data on disk is now consistent. Reset backupStartPoint
* and backupEndPoint.
*/
elog(DEBUG1, "end of backup reached");
@ -6863,9 +6868,9 @@ StartupXLOG(void)
oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
/*
* Update full_page_writes in shared memory and write an
* XLOG_FPW_CHANGE record before resource manager writes cleanup
* WAL records or checkpoint record is written.
* Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
* record before resource manager writes cleanup WAL records or checkpoint
* record is written.
*/
Insert->fullPageWrites = lastFullPageWrites;
LocalSetXLogInsertAllowed();
@ -6954,8 +6959,8 @@ StartupXLOG(void)
LWLockRelease(ProcArrayLock);
/*
* Start up the commit log and subtrans, if not already done for
* hot standby.
* Start up the commit log and subtrans, if not already done for hot
* standby.
*/
if (standbyState == STANDBY_DISABLED)
{
@ -7705,9 +7710,9 @@ CreateCheckPoint(int flags)
checkPoint.time = (pg_time_t) time(NULL);
/*
* For Hot Standby, derive the oldestActiveXid before we fix the redo pointer.
* This allows us to begin accumulating changes to assemble our starting
* snapshot of locks and transactions.
* For Hot Standby, derive the oldestActiveXid before we fix the redo
* pointer. This allows us to begin accumulating changes to assemble our
* starting snapshot of locks and transactions.
*/
if (!shutdown && XLogStandbyInfoActive())
checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
@ -8082,10 +8087,11 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
* Also refrain from creating a restartpoint if we have seen any references
* to non-existent pages. Restarting recovery from the restartpoint would
* not see the references, so we would lose the cross-check that the pages
* belonged to a relation that was dropped later.
* Also refrain from creating a restartpoint if we have seen any
* references to non-existent pages. Restarting recovery from the
* restartpoint would not see the references, so we would lose the
* cross-check that the pages belonged to a relation that was dropped
* later.
*/
if (XLogHaveInvalidPages())
{
@ -8098,8 +8104,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
* Copy the checkpoint record to shared memory, so that checkpointer
* can work out the next time it wants to perform a restartpoint.
* Copy the checkpoint record to shared memory, so that checkpointer can
* work out the next time it wants to perform a restartpoint.
*/
SpinLockAcquire(&xlogctl->info_lck);
XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
@ -8493,8 +8499,8 @@ UpdateFullPageWrites(void)
* Do nothing if full_page_writes has not been changed.
*
* It's safe to check the shared full_page_writes without the lock,
* because we assume that there is no concurrently running process
* which can update it.
* because we assume that there is no concurrently running process which
* can update it.
*/
if (fullPageWrites == Insert->fullPageWrites)
return;
@ -8505,8 +8511,8 @@ UpdateFullPageWrites(void)
* It's always safe to take full page images, even when not strictly
* required, but not the other round. So if we're setting full_page_writes
* to true, first set it true and then write the WAL record. If we're
* setting it to false, first write the WAL record and then set the
* global flag.
* setting it to false, first write the WAL record and then set the global
* flag.
*/
if (fullPageWrites)
{
@ -8516,8 +8522,8 @@ UpdateFullPageWrites(void)
}
/*
* Write an XLOG_FPW_CHANGE record. This allows us to keep
* track of full_page_writes during archive recovery, if required.
* Write an XLOG_FPW_CHANGE record. This allows us to keep track of
* full_page_writes during archive recovery, if required.
*/
if (XLogStandbyInfoActive() && !RecoveryInProgress())
{
@ -8797,9 +8803,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
/*
* Update the LSN of the last replayed XLOG_FPW_CHANGE record
* so that do_pg_start_backup() and do_pg_stop_backup() can check
* whether full_page_writes has been disabled during online backup.
* Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
* do_pg_start_backup() and do_pg_stop_backup() can check whether
* full_page_writes has been disabled during online backup.
*/
if (!fpw)
{
@ -9115,8 +9121,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
errhint("WAL control functions cannot be executed during recovery.")));
/*
* During recovery, we don't need to check WAL level. Because, if WAL level
* is not sufficient, it's impossible to get here during recovery.
* During recovery, we don't need to check WAL level. Because, if WAL
* level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@ -9207,12 +9213,13 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
*
* During recovery, establish a restartpoint if possible. We use the last
* restartpoint as the backup starting checkpoint. This means that two
* successive backup runs can have same checkpoint positions.
* During recovery, establish a restartpoint if possible. We use
* the last restartpoint as the backup starting checkpoint. This
* means that two successive backup runs can have same checkpoint
* positions.
*
* Since the fact that we are executing do_pg_start_backup() during
* recovery means that checkpointer is running, we can use
* Since the fact that we are executing do_pg_start_backup()
* during recovery means that checkpointer is running, we can use
* RequestCheckpoint() to establish a restartpoint.
*
* We use CHECKPOINT_IMMEDIATE only if requested by user (via
@ -9240,9 +9247,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
XLogRecPtr recptr;
/*
* Check to see if all WAL replayed during online backup (i.e.,
* since last restartpoint used as backup starting checkpoint)
* contain full-page writes.
* Check to see if all WAL replayed during online backup
* (i.e., since last restartpoint used as backup starting
* checkpoint) contain full-page writes.
*/
SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->lastFpwDisableRecPtr;
@ -9260,10 +9267,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
/*
* During recovery, since we don't use the end-of-backup WAL
* record and don't write the backup history file, the starting WAL
* location doesn't need to be unique. This means that two base
* backups started at the same time might use the same checkpoint
* as starting locations.
* record and don't write the backup history file, the
* starting WAL location doesn't need to be unique. This means
* that two base backups started at the same time might use
* the same checkpoint as starting locations.
*/
gotUniqueStartpoint = true;
}
@ -9443,8 +9450,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
errhint("WAL control functions cannot be executed during recovery.")));
/*
* During recovery, we don't need to check WAL level. Because, if WAL level
* is not sufficient, it's impossible to get here during recovery.
* During recovery, we don't need to check WAL level. Because, if WAL
* level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@ -9537,9 +9544,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
/*
* Parse the BACKUP FROM line. If we are taking an online backup from
* the standby, we confirm that the standby has not been promoted
* during the backup.
* Parse the BACKUP FROM line. If we are taking an online backup from the
* standby, we confirm that the standby has not been promoted during the
* backup.
*/
ptr = strstr(remaining, "BACKUP FROM:");
if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1)
@ -9555,30 +9562,30 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
"Try taking another online backup.")));
/*
* During recovery, we don't write an end-of-backup record. We assume
* that pg_control was backed up last and its minimum recovery
* point can be available as the backup end location. Since we don't
* have an end-of-backup record, we use the pg_control value to check
* whether we've reached the end of backup when starting recovery from
* this backup. We have no way of checking if pg_control wasn't backed
* up last however.
* During recovery, we don't write an end-of-backup record. We assume that
* pg_control was backed up last and its minimum recovery point can be
* available as the backup end location. Since we don't have an
* end-of-backup record, we use the pg_control value to check whether
* we've reached the end of backup when starting recovery from this
* backup. We have no way of checking if pg_control wasn't backed up last
* however.
*
* We don't force a switch to new WAL file and wait for all the required
* files to be archived. This is okay if we use the backup to start
* the standby. But, if it's for an archive recovery, to ensure all the
* required files are available, a user should wait for them to be archived,
* or include them into the backup.
* files to be archived. This is okay if we use the backup to start the
* standby. But, if it's for an archive recovery, to ensure all the
* required files are available, a user should wait for them to be
* archived, or include them into the backup.
*
* We return the current minimum recovery point as the backup end
* location. Note that it's would be bigger than the exact backup end
* location if the minimum recovery point is updated since the backup
* of pg_control. This is harmless for current uses.
* location if the minimum recovery point is updated since the backup of
* pg_control. This is harmless for current uses.
*
* XXX currently a backup history file is for informational and debug
* purposes only. It's not essential for an online backup. Furthermore,
* even if it's created, it will not be archived during recovery because
* an archiver is not invoked. So it doesn't seem worthwhile to write
* a backup history file during recovery.
* an archiver is not invoked. So it doesn't seem worthwhile to write a
* backup history file during recovery.
*/
if (backup_started_in_recovery)
{
@ -9905,10 +9912,11 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
/*
* BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't
* restore from an older backup anyway, but since the information on it
* is not strictly required, don't error out if it's missing for some reason.
* BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore
* from an older backup anyway, but since the information on it is not
* strictly required, don't error out if it's missing for some reason.
*/
if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
{
@ -10050,8 +10058,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{
/*
* Request a restartpoint if we've replayed too much
* xlog since the last one.
* Request a restartpoint if we've replayed too much xlog since the
* last one.
*/
if (StandbyMode && bgwriterLaunched)
{

View File

@ -80,10 +80,10 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/*
* Once recovery has reached a consistent state, the invalid-page table
* should be empty and remain so. If a reference to an invalid page is
* found after consistency is reached, PANIC immediately. This might
* seem aggressive, but it's better than letting the invalid reference
* linger in the hash table until the end of recovery and PANIC there,
* which might come only much later if this is a standby server.
* found after consistency is reached, PANIC immediately. This might seem
* aggressive, but it's better than letting the invalid reference linger
* in the hash table until the end of recovery and PANIC there, which
* might come only much later if this is a standby server.
*/
if (reachedConsistency)
{

View File

@ -189,7 +189,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
AclItem aclitem;
Acl *newer_acl;
aclitem. ai_grantee = lfirst_oid(j);
aclitem.ai_grantee = lfirst_oid(j);
/*
* Grant options can only be granted to individual roles, not PUBLIC.
@ -202,7 +202,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to roles")));
aclitem. ai_grantor = grantorId;
aclitem.ai_grantor = grantorId;
/*
* The asymmetry in the conditions here comes from the spec. In

View File

@ -352,7 +352,8 @@ performMultipleDeletions(const ObjectAddresses *objects,
free_object_addresses(targetObjects);
/*
* We closed depRel earlier in deleteOneObject if doing a drop concurrently
* We closed depRel earlier in deleteOneObject if doing a drop
* concurrently
*/
if ((flags & PERFORM_DELETION_CONCURRENTLY) != PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock);
@ -517,9 +518,9 @@ findDependentObjects(const ObjectAddress *object,
* owner). If so, and if we aren't recursing from the owning object, we
* have to transform this deletion request into a deletion request of the
* owning object. (We'll eventually recurse back to this object, but the
* owning object has to be visited first so it will be deleted after.)
* The way to find out about this is to scan the pg_depend entries that
* show what this object depends on.
* owning object has to be visited first so it will be deleted after.) The
* way to find out about this is to scan the pg_depend entries that show
* what this object depends on.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
@ -593,8 +594,8 @@ findDependentObjects(const ObjectAddress *object,
* Exception 1b: if the owning object is the extension
* currently being created/altered, it's okay to continue
* with the deletion. This allows dropping of an
* extension's objects within the extension's scripts,
* as well as corner cases such as dropping a transient
* extension's objects within the extension's scripts, as
* well as corner cases such as dropping a transient
* object created within such a script.
*/
if (creating_extension &&
@ -618,8 +619,8 @@ findDependentObjects(const ObjectAddress *object,
* it's okay to continue with the deletion. This holds when
* recursing from a whole object that includes the nominal
* other end as a component, too. Since there can be more
* than one "owning" object, we have to allow matches that
* are more than one level down in the stack.
* than one "owning" object, we have to allow matches that are
* more than one level down in the stack.
*/
if (stack_address_present_add_flags(&otherObject, 0, stack))
break;
@ -1000,6 +1001,7 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
if (object_access_hook)
{
ObjectAccessDrop drop_arg;
drop_arg.dropflags = flags;
InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId,
object->objectSubId, &drop_arg);
@ -1049,8 +1051,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
object->objectSubId);
/*
* Close depRel if we are doing a drop concurrently because it
* commits the transaction, so we don't want dangling references.
* Close depRel if we are doing a drop concurrently because it commits the
* transaction, so we don't want dangling references.
*/
if ((flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock);

View File

@ -1324,8 +1324,8 @@ index_drop(Oid indexId, bool concurrent)
CheckTableNotInUse(userIndexRelation, "DROP INDEX");
/*
* Drop Index concurrently is similar in many ways to creating an
* index concurrently, so some actions are similar to DefineIndex()
* Drop Index concurrently is similar in many ways to creating an index
* concurrently, so some actions are similar to DefineIndex()
*/
if (concurrent)
{
@ -1373,15 +1373,15 @@ index_drop(Oid indexId, bool concurrent)
* will be marked not indisvalid, so that no one else tries to either
* insert into it or use it for queries.
*
* We must commit our current transaction so that the index update becomes
* visible; then start another. Note that all the data structures we just
* built are lost in the commit. The only data we keep past here are the
* relation IDs.
* We must commit our current transaction so that the index update
* becomes visible; then start another. Note that all the data
* structures we just built are lost in the commit. The only data we
* keep past here are the relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
* that neither it nor the index can be dropped before we finish. This
* cannot block, even if someone else is waiting for access, because we
* already have the same lock within our transaction.
* cannot block, even if someone else is waiting for access, because
* we already have the same lock within our transaction.
*/
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock);
@ -1391,23 +1391,23 @@ index_drop(Oid indexId, bool concurrent)
StartTransactionCommand();
/*
* Now we must wait until no running transaction could have the table open
* with the old list of indexes. To do this, inquire which xacts
* currently would conflict with AccessExclusiveLock on the table -- ie,
* which ones have a lock of any kind on the table. Then wait for each of
* these xacts to commit or abort. Note we do not need to worry about
* xacts that open the table for writing after this point; they will see
* the index as invalid when they open the relation.
* Now we must wait until no running transaction could have the table
* open with the old list of indexes. To do this, inquire which xacts
* currently would conflict with AccessExclusiveLock on the table --
* ie, which ones have a lock of any kind on the table. Then wait for
* each of these xacts to commit or abort. Note we do not need to
* worry about xacts that open the table for writing after this point;
* they will see the index as invalid when they open the relation.
*
* Note: the reason we use actual lock acquisition here, rather than just
* checking the ProcArray and sleeping, is that deadlock is possible if
* one of the transactions in question is blocked trying to acquire an
* exclusive lock on our table. The lock code will detect deadlock and
* error out properly.
* Note: the reason we use actual lock acquisition here, rather than
* just checking the ProcArray and sleeping, is that deadlock is
* possible if one of the transactions in question is blocked trying
* to acquire an exclusive lock on our table. The lock code will
* detect deadlock and error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
* check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything more.
* Note: GetLockConflicts() never reports our own xid, hence we need
* not check for that. Also, prepared xacts are not reported, which
* is fine since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);

View File

@ -247,11 +247,11 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
}
/*
* DDL operations can change the results of a name lookup. Since all
* such operations will generate invalidation messages, we keep track
* of whether any such messages show up while we're performing the
* operation, and retry until either (1) no more invalidation messages
* show up or (2) the answer doesn't change.
* DDL operations can change the results of a name lookup. Since all such
* operations will generate invalidation messages, we keep track of
* whether any such messages show up while we're performing the operation,
* and retry until either (1) no more invalidation messages show up or (2)
* the answer doesn't change.
*
* But if lockmode = NoLock, then we assume that either the caller is OK
* with the answer changing under them, or that they already hold some
@ -259,8 +259,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
* checking for invalidation messages. Also, if the requested lock is
* already held, no LockRelationOid will not AcceptInvalidationMessages,
* so we may fail to notice a change. We could protect against that case
* by calling AcceptInvalidationMessages() before beginning this loop,
* but that would add a significant amount overhead, so for now we don't.
* by calling AcceptInvalidationMessages() before beginning this loop, but
* that would add a significant amount overhead, so for now we don't.
*/
for (;;)
{
@ -288,6 +288,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
if (relation->schemaname)
{
Oid namespaceId;
namespaceId = LookupExplicitNamespace(relation->schemaname);
if (namespaceId != myTempNamespace)
ereport(ERROR,
@ -315,12 +316,12 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/*
* Invoke caller-supplied callback, if any.
*
* This callback is a good place to check permissions: we haven't taken
* the table lock yet (and it's really best to check permissions before
* locking anything!), but we've gotten far enough to know what OID we
* think we should lock. Of course, concurrent DDL might change things
* while we're waiting for the lock, but in that case the callback will
* be invoked again for the new OID.
* This callback is a good place to check permissions: we haven't
* taken the table lock yet (and it's really best to check permissions
* before locking anything!), but we've gotten far enough to know what
* OID we think we should lock. Of course, concurrent DDL might
* change things while we're waiting for the lock, but in that case
* the callback will be invoked again for the new OID.
*/
if (callback)
callback(relation, relId, oldRelId, callback_arg);
@ -328,21 +329,21 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/*
* If no lock requested, we assume the caller knows what they're
* doing. They should have already acquired a heavyweight lock on
* this relation earlier in the processing of this same statement,
* so it wouldn't be appropriate to AcceptInvalidationMessages()
* here, as that might pull the rug out from under them.
* this relation earlier in the processing of this same statement, so
* it wouldn't be appropriate to AcceptInvalidationMessages() here, as
* that might pull the rug out from under them.
*/
if (lockmode == NoLock)
break;
/*
* If, upon retry, we get back the same OID we did last time, then
* the invalidation messages we processed did not change the final
* answer. So we're done.
* If, upon retry, we get back the same OID we did last time, then the
* invalidation messages we processed did not change the final answer.
* So we're done.
*
* If we got a different OID, we've locked the relation that used to
* have this name rather than the one that does now. So release
* the lock.
* have this name rather than the one that does now. So release the
* lock.
*/
if (retry)
{
@ -384,8 +385,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
break;
/*
* Something may have changed. Let's repeat the name lookup, to
* make sure this name still references the same relation it did
* Something may have changed. Let's repeat the name lookup, to make
* sure this name still references the same relation it did
* previously.
*/
retry = true;
@ -550,8 +551,8 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation,
relid = InvalidOid;
/*
* In bootstrap processing mode, we don't bother with permissions
* or locking. Permissions might not be working yet, and locking is
* In bootstrap processing mode, we don't bother with permissions or
* locking. Permissions might not be working yet, and locking is
* unnecessary.
*/
if (IsBootstrapProcessingMode())

View File

@ -286,9 +286,9 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
for (;;)
{
/*
* Remember this value, so that, after looking up the object name
* and locking it, we can check whether any invalidation messages
* have been processed that might require a do-over.
* Remember this value, so that, after looking up the object name and
* locking it, we can check whether any invalidation messages have
* been processed that might require a do-over.
*/
inval_count = SharedInvalidMessageCounter;
@ -475,8 +475,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* At this point, we've resolved the name to an OID and locked the
* corresponding database object. However, it's possible that by the
* time we acquire the lock on the object, concurrent DDL has modified
* the database in such a way that the name we originally looked up
* no longer resolves to that OID.
* the database in such a way that the name we originally looked up no
* longer resolves to that OID.
*
* We can be certain that this isn't an issue if (a) no shared
* invalidation messages have been processed or (b) we've locked a
@ -488,8 +488,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* the relation, which is enough to freeze out any concurrent DDL.
*
* In all other cases, however, it's possible that the name we looked
* up no longer refers to the object we locked, so we retry the
* lookup and see whether we get the same answer.
* up no longer refers to the object we locked, so we retry the lookup
* and see whether we get the same answer.
*/
if (inval_count == SharedInvalidMessageCounter || relation != NULL)
break;
@ -721,8 +721,8 @@ get_object_address_relobject(ObjectType objtype, List *objname,
address.objectSubId = 0;
/*
* Caller is expecting to get back the relation, even though we
* didn't end up using it to find the rule.
* Caller is expecting to get back the relation, even though we didn't
* end up using it to find the rule.
*/
if (OidIsValid(address.objectId))
relation = heap_open(reloid, AccessShareLock);
@ -837,6 +837,7 @@ get_object_address_type(ObjectType objtype,
ObjectAddress address;
TypeName *typename;
Type tup;
typename = makeTypeNameFromNameList(objname);
address.classId = TypeRelationId;

View File

@ -500,8 +500,8 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* Forcibly create relation if it doesn't exist (which suggests that
* it was dropped somewhere later in the WAL sequence). As in
* XLogReadBuffer, we prefer to recreate the rel and replay the log
* as best we can until the drop is seen.
* XLogReadBuffer, we prefer to recreate the rel and replay the log as
* best we can until the drop is seen.
*/
smgrcreate(reln, MAIN_FORKNUM, true);

View File

@ -205,8 +205,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
}
/*
* Check that it's a plain table or foreign table; we used to do this
* in get_rel_oids() but seems safer to check after we've locked the
* Check that it's a plain table or foreign table; we used to do this in
* get_rel_oids() but seems safer to check after we've locked the
* relation.
*/
if (onerel->rd_rel->relkind == RELKIND_RELATION)
@ -464,8 +464,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Determine how many rows we need to sample, using the worst case from
* all analyzable columns. We use a lower bound of 100 rows to avoid
* possible overflow in Vitter's algorithm. (Note: that will also be
* the target in the corner case where there are no analyzable columns.)
* possible overflow in Vitter's algorithm. (Note: that will also be the
* target in the corner case where there are no analyzable columns.)
*/
targrows = 100;
for (i = 0; i < attr_cnt; i++)

View File

@ -594,10 +594,10 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
* Note that the NewHeap will not
* receive any of the defaults or constraints associated with the OldHeap;
* we don't need 'em, and there's no reason to spend cycles inserting them
* into the catalogs only to delete them.
* Note that the NewHeap will not receive any of the defaults or
* constraints associated with the OldHeap; we don't need 'em, and there's
* no reason to spend cycles inserting them into the catalogs only to
* delete them.
*/
/*

View File

@ -1861,6 +1861,7 @@ CopyFrom(CopyState cstate)
uint64 processed = 0;
bool useHeapMultiInsert;
int nBufferedTuples = 0;
#define MAX_BUFFERED_TUPLES 1000
HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */
Size bufferedTuplesSize = 0;
@ -2162,8 +2163,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
int i;
/*
* heap_multi_insert leaks memory, so switch to short-lived memory
* context before calling it.
* heap_multi_insert leaks memory, so switch to short-lived memory context
* before calling it.
*/
oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
heap_multi_insert(cstate->rel,
@ -2175,8 +2176,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
MemoryContextSwitchTo(oldcontext);
/*
* If there are any indexes, update them for all the inserted tuples,
* and run AFTER ROW INSERT triggers.
* If there are any indexes, update them for all the inserted tuples, and
* run AFTER ROW INSERT triggers.
*/
if (resultRelInfo->ri_NumIndices > 0)
{
@ -2194,6 +2195,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
list_free(recheckIndexes);
}
}
/*
* There's no indexes, but see if we need to run AFTER ROW INSERT triggers
* anyway.

View File

@ -115,10 +115,10 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/*
* Use a snapshot with an updated command ID to ensure this query sees
* results of any previously executed queries. (This could only matter
* if the planner executed an allegedly-stable function that changed
* the database contents, but let's do it anyway to be parallel to the
* EXPLAIN code path.)
* results of any previously executed queries. (This could only matter if
* the planner executed an allegedly-stable function that changed the
* database contents, but let's do it anyway to be parallel to the EXPLAIN
* code path.)
*/
PushCopiedSnapshot(GetActiveSnapshot());
UpdateActiveSnapshotCommandId();
@ -237,8 +237,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
create->if_not_exists = false;
/*
* Build column definitions using "pre-cooked" type and collation info.
* If a column name list was specified in CREATE TABLE AS, override the
* Build column definitions using "pre-cooked" type and collation info. If
* a column name list was specified in CREATE TABLE AS, override the
* column names derived from the query. (Too few column names are OK, too
* many are not.)
*/
@ -280,9 +280,9 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
/*
* It's possible that the column is of a collatable type but the
* collation could not be resolved, so double-check. (We must
* check this here because DefineRelation would adopt the type's
* default collation rather than complaining.)
* collation could not be resolved, so double-check. (We must check
* this here because DefineRelation would adopt the type's default
* collation rather than complaining.)
*/
if (!OidIsValid(col->collOid) &&
type_is_collatable(coltype->typeOid))

View File

@ -785,6 +785,7 @@ dropdb(const char *dbname, bool missing_ok)
if (object_access_hook)
{
ObjectAccessDrop drop_arg;
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP,
DatabaseRelationId, db_id, 0, &drop_arg);
@ -831,8 +832,7 @@ dropdb(const char *dbname, bool missing_ok)
ReleaseSysCache(tup);
/*
* Delete any comments or security labels associated with
* the database.
* Delete any comments or security labels associated with the database.
*/
DeleteSharedComments(db_id, DatabaseRelationId);
DeleteSharedSecurityLabel(db_id, DatabaseRelationId);
@ -860,18 +860,18 @@ dropdb(const char *dbname, bool missing_ok)
pgstat_drop_database(db_id);
/*
* Tell checkpointer to forget any pending fsync and unlink requests for files
* in the database; else the fsyncs will fail at next checkpoint, or
* Tell checkpointer to forget any pending fsync and unlink requests for
* files in the database; else the fsyncs will fail at next checkpoint, or
* worse, it will delete files that belong to a newly created database
* with the same OID.
*/
ForgetDatabaseFsyncRequests(db_id);
/*
* Force a checkpoint to make sure the checkpointer has received the message
* sent by ForgetDatabaseFsyncRequests. On Windows, this also ensures that
* background procs don't hold any open files, which would cause rmdir() to
* fail.
* Force a checkpoint to make sure the checkpointer has received the
* message sent by ForgetDatabaseFsyncRequests. On Windows, this also
* ensures that background procs don't hold any open files, which would
* cause rmdir() to fail.
*/
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT);

View File

@ -231,7 +231,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
args = strVal(linitial(objargs));
break;
default:
elog(ERROR, "unexpected object type (%d)", (int)objtype);
elog(ERROR, "unexpected object type (%d)", (int) objtype);
break;
}

View File

@ -340,9 +340,9 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
if (IsA(utilityStmt, CreateTableAsStmt))
{
/*
* We have to rewrite the contained SELECT and then pass it back
* to ExplainOneQuery. It's probably not really necessary to copy
* the contained parsetree another time, but let's be safe.
* We have to rewrite the contained SELECT and then pass it back to
* ExplainOneQuery. It's probably not really necessary to copy the
* contained parsetree another time, but let's be safe.
*/
CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt;
List *rewritten;

View File

@ -890,9 +890,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
ReleaseSysCache(languageTuple);
/*
* Only superuser is allowed to create leakproof functions because
* it possibly allows unprivileged users to reference invisible tuples
* to be filtered out using views for row-level security.
* Only superuser is allowed to create leakproof functions because it
* possibly allows unprivileged users to reference invisible tuples to be
* filtered out using views for row-level security.
*/
if (isLeakProof && !superuser())
ereport(ERROR,

View File

@ -134,6 +134,7 @@ CheckIndexCompatible(Oid oldId,
/* Caller should already have the relation locked in some way. */
relationId = RangeVarGetRelid(heapRelation, NoLock, false);
/*
* We can pretend isconstraint = false unconditionally. It only serves to
* decide the text of an error message that should never happen for us.
@ -157,10 +158,10 @@ CheckIndexCompatible(Oid oldId,
ReleaseSysCache(tuple);
/*
* Compute the operator classes, collations, and exclusion operators
* for the new index, so we can test whether it's compatible with the
* existing one. Note that ComputeIndexAttrs might fail here, but that's
* OK: DefineIndex would have called this function with the same arguments
* Compute the operator classes, collations, and exclusion operators for
* the new index, so we can test whether it's compatible with the existing
* one. Note that ComputeIndexAttrs might fail here, but that's OK:
* DefineIndex would have called this function with the same arguments
* later on, and it would have failed then anyway.
*/
indexInfo = makeNode(IndexInfo);
@ -232,7 +233,8 @@ CheckIndexCompatible(Oid oldId,
/* Any change in exclusion operator selections breaks compatibility. */
if (ret && indexInfo->ii_ExclusionOps != NULL)
{
Oid *old_operators, *old_procs;
Oid *old_operators,
*old_procs;
uint16 *old_strats;
RelationGetExclusionInfo(irel, &old_operators, &old_procs, &old_strats);
@ -1778,9 +1780,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
return;
/*
* If the relation does exist, check whether it's an index. But note
* that the relation might have been dropped between the time we did the
* name lookup and now. In that case, there's nothing to do.
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
* lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
@ -1798,9 +1800,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
if (relId != oldRelId)
{
/*
* Lock level here should match reindex_index() heap lock.
* If the OID isn't valid, it means the index as concurrently dropped,
* which is not a problem for us; just return normally.
* Lock level here should match reindex_index() heap lock. If the OID
* isn't valid, it means the index as concurrently dropped, which is
* not a problem for us; just return normally.
*/
*heapOid = IndexGetRelation(relId, true);
if (OidIsValid(*heapOid))

View File

@ -40,9 +40,9 @@ LockTableCommand(LockStmt *lockstmt)
/*
* During recovery we only accept these variations: LOCK TABLE foo IN
* ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo
* IN ROW EXCLUSIVE MODE This test must match the restrictions defined
* in LockAcquire()
* ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo IN
* ROW EXCLUSIVE MODE This test must match the restrictions defined in
* LockAcquire()
*/
if (lockstmt->mode > RowExclusiveLock)
PreventCommandDuringRecovery("LOCK TABLE");
@ -74,7 +74,7 @@ static void
RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid,
void *arg)
{
LOCKMODE lockmode = * (LOCKMODE *) arg;
LOCKMODE lockmode = *(LOCKMODE *) arg;
char relkind;
AclResult aclresult;
@ -82,7 +82,8 @@ RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid,
return; /* doesn't exist, so no permissions check */
relkind = get_rel_relkind(relid);
if (!relkind)
return; /* woops, concurrently dropped; no permissions check */
return; /* woops, concurrently dropped; no permissions
* check */
/* Currently, we only allow plain tables to be locked */
if (relkind != RELKIND_RELATION)
@ -122,6 +123,7 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
if (aclresult != ACLCHECK_OK)
{
char *relname = get_rel_name(childreloid);
if (!relname)
continue; /* child concurrently dropped, just skip it */
aclcheck_error(aclresult, ACL_KIND_CLASS, relname);
@ -134,6 +136,7 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
{
/* try to throw error by name; relation could be deleted... */
char *relname = get_rel_name(childreloid);
if (!relname)
continue; /* child concurrently dropped, just skip it */
ereport(ERROR,
@ -143,8 +146,8 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
}
/*
* Even if we got the lock, child might have been concurrently dropped.
* If so, we can skip it.
* Even if we got the lock, child might have been concurrently
* dropped. If so, we can skip it.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(childreloid)))
{

View File

@ -244,8 +244,8 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
* the OID-determining eflags (PortalStart won't handle them in such a
* case, and for that matter it's not clear the executor will either).
*
* For CREATE TABLE ... AS EXECUTE, we also have to ensure that the
* proper eflags and fetch count are passed to PortalStart/PortalRun.
* For CREATE TABLE ... AS EXECUTE, we also have to ensure that the proper
* eflags and fetch count are passed to PortalStart/PortalRun.
*/
if (intoClause)
{

View File

@ -514,12 +514,12 @@ nextval(PG_FUNCTION_ARGS)
sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin));
/*
* XXX: This is not safe in the presence of concurrent DDL, but
* acquiring a lock here is more expensive than letting nextval_internal
* do it, since the latter maintains a cache that keeps us from hitting
* the lock manager more than once per transaction. It's not clear
* whether the performance penalty is material in practice, but for now,
* we do it this way.
* XXX: This is not safe in the presence of concurrent DDL, but acquiring
* a lock here is more expensive than letting nextval_internal do it,
* since the latter maintains a cache that keeps us from hitting the lock
* manager more than once per transaction. It's not clear whether the
* performance penalty is material in practice, but for now, we do it this
* way.
*/
relid = RangeVarGetRelid(sequence, NoLock, false);
@ -1543,9 +1543,9 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
* is also used for updating sequences, it's possible that a hot-standby
* backend is examining the page concurrently; so we mustn't transiently
* trash the buffer. The solution is to build the correct new page
* contents in local workspace and then memcpy into the buffer. Then
* only bytes that are supposed to change will change, even transiently.
* We must palloc the local page for alignment reasons.
* contents in local workspace and then memcpy into the buffer. Then only
* bytes that are supposed to change will change, even transiently. We
* must palloc the local page for alignment reasons.
*/
localpage = (Page) palloc(BufferGetPageSize(buffer));

View File

@ -914,9 +914,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
/*
* In DROP INDEX, attempt to acquire lock on the parent table before
* locking the index. index_drop() will need this anyway, and since
* regular queries lock tables before their indexes, we risk deadlock
* if we do it the other way around. No error if we don't find a
* pg_index entry, though --- the relation may have been droppd.
* regular queries lock tables before their indexes, we risk deadlock if
* we do it the other way around. No error if we don't find a pg_index
* entry, though --- the relation may have been droppd.
*/
if (relkind == RELKIND_INDEX && relOid != oldRelOid)
{
@ -2391,7 +2391,11 @@ rename_constraint_internal(Oid myrelid,
else
{
targetrelation = relation_open(myrelid, AccessExclusiveLock);
/* don't tell it whether we're recursing; we allow changing typed tables here */
/*
* don't tell it whether we're recursing; we allow changing typed
* tables here
*/
renameatt_check(myrelid, RelationGetForm(targetrelation), false);
constraintOid = get_relation_constraint_oid(myrelid, oldconname, false);
@ -2492,7 +2496,7 @@ RenameConstraint(RenameStmt *stmt)
stmt->newname,
stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
false, /* recursing? */
0 /* expected inhcount */);
0 /* expected inhcount */ );
}
/*
@ -2507,8 +2511,8 @@ RenameRelation(RenameStmt *stmt)
* Grab an exclusive lock on the target table, index, sequence or view,
* which we will NOT release until end of transaction.
*
* Lock level used here should match RenameRelationInternal, to avoid
* lock escalation.
* Lock level used here should match RenameRelationInternal, to avoid lock
* escalation.
*/
relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
stmt->missing_ok, false,
@ -2773,22 +2777,22 @@ LOCKMODE
AlterTableGetLockLevel(List *cmds)
{
/*
* Late in 9.1 dev cycle a number of issues were uncovered with access
* to catalog relations, leading to the decision to re-enforce all DDL
* at AccessExclusiveLock level by default.
* Late in 9.1 dev cycle a number of issues were uncovered with access to
* catalog relations, leading to the decision to re-enforce all DDL at
* AccessExclusiveLock level by default.
*
* The issues are that there is a pervasive assumption in the code that
* the catalogs will not be read unless an AccessExclusiveLock is held.
* If that rule is relaxed, we must protect against a number of potential
* the catalogs will not be read unless an AccessExclusiveLock is held. If
* that rule is relaxed, we must protect against a number of potential
* effects - infrequent, but proven possible with test cases where
* multiple DDL operations occur in a stream against frequently accessed
* tables.
*
* 1. Catalog tables are read using SnapshotNow, which has a race bug
* that allows a scan to return no valid rows even when one is present
* in the case of a commit of a concurrent update of the catalog table.
* SnapshotNow also ignores transactions in progress, so takes the
* latest committed version without waiting for the latest changes.
* 1. Catalog tables are read using SnapshotNow, which has a race bug that
* allows a scan to return no valid rows even when one is present in the
* case of a commit of a concurrent update of the catalog table.
* SnapshotNow also ignores transactions in progress, so takes the latest
* committed version without waiting for the latest changes.
*
* 2. Relcache needs to be internally consistent, so unless we lock the
* definition during reads we have no way to guarantee that.
@ -4999,8 +5003,8 @@ ATExecColumnDefault(Relation rel, const char *colName,
* safety, but at present we do not expect anything to depend on the
* default.
*
* We treat removing the existing default as an internal operation when
* it is preparatory to adding a new default, but as a user-initiated
* We treat removing the existing default as an internal operation when it
* is preparatory to adding a new default, but as a user-initiated
* operation when the user asked for a drop.
*/
RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false,
@ -5514,6 +5518,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
if (OidIsValid(stmt->oldNode))
{
Relation irel = index_open(new_index, NoLock);
RelationPreserveStorage(irel->rd_node, true);
index_close(irel, NoLock);
}
@ -6067,8 +6072,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* operand, revalidate the constraint. For this evaluation, a
* binary coercion cast is equivalent to no cast at all. While
* type implementors should design implicit casts with an eye
* toward consistency of operations like equality, we cannot assume
* here that they have done so.
* toward consistency of operations like equality, we cannot
* assume here that they have done so.
*
* A function with a polymorphic argument could change behavior
* arbitrarily in response to get_fn_expr_argtype(). Therefore,
@ -6091,8 +6096,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* We need not directly consider the PK type. It's necessarily
* binary coercible to the opcintype of the unique index column,
* and ri_triggers.c will only deal with PK datums in terms of that
* opcintype. Changing the opcintype also changes pfeqop.
* and ri_triggers.c will only deal with PK datums in terms of
* that opcintype. Changing the opcintype also changes pfeqop.
*/
old_check_ok = (new_pathtype == old_pathtype &&
new_castfunc == old_castfunc &&
@ -6144,11 +6149,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
createForeignKeyTriggers(rel, fkconstraint, constrOid, indexOid);
/*
* Tell Phase 3 to check that the constraint is satisfied by existing rows.
* We can skip this during table creation, when requested explicitly by
* specifying NOT VALID in an ADD FOREIGN KEY command, and when we're
* recreating a constraint following a SET DATA TYPE operation that did not
* impugn its validity.
* Tell Phase 3 to check that the constraint is satisfied by existing
* rows. We can skip this during table creation, when requested explicitly
* by specifying NOT VALID in an ADD FOREIGN KEY command, and when we're
* recreating a constraint following a SET DATA TYPE operation that did
* not impugn its validity.
*/
if (!old_check_ok && !fkconstraint->skip_validation)
{
@ -6236,12 +6241,12 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse,
Relation refrel;
/*
* Triggers are already in place on both tables, so a concurrent write
* that alters the result here is not possible. Normally we can run a
* query here to do the validation, which would only require
* AccessShareLock. In some cases, it is possible that we might need
* to fire triggers to perform the check, so we take a lock at
* RowShareLock level just in case.
* Triggers are already in place on both tables, so a concurrent
* write that alters the result here is not possible. Normally we
* can run a query here to do the validation, which would only
* require AccessShareLock. In some cases, it is possible that we
* might need to fire triggers to perform the check, so we take a
* lock at RowShareLock level just in case.
*/
refrel = heap_open(con->confrelid, RowShareLock);
@ -6679,10 +6684,11 @@ validateCheckConstraint(Relation rel, HeapTuple constrtup)
constrForm = (Form_pg_constraint) GETSTRUCT(constrtup);
estate = CreateExecutorState();
/*
* XXX this tuple doesn't really come from a syscache, but this doesn't
* matter to SysCacheGetAttr, because it only wants to be able to fetch the
* tupdesc
* matter to SysCacheGetAttr, because it only wants to be able to fetch
* the tupdesc
*/
val = SysCacheGetAttr(CONSTROID, constrtup, Anum_pg_constraint_conbin,
&isnull);
@ -7140,8 +7146,7 @@ ATExecDropConstraint(Relation rel, const char *constrName,
{
/*
* If the child constraint has other definition sources, just
* decrement its inheritance count; if not, recurse to delete
* it.
* decrement its inheritance count; if not, recurse to delete it.
*/
if (con->coninhcount == 1 && !con->conislocal)
{
@ -7164,9 +7169,9 @@ ATExecDropConstraint(Relation rel, const char *constrName,
else
{
/*
* If we were told to drop ONLY in this table (no recursion),
* we need to mark the inheritors' constraints as locally
* defined rather than inherited.
* If we were told to drop ONLY in this table (no recursion), we
* need to mark the inheritors' constraints as locally defined
* rather than inherited.
*/
con->coninhcount--;
con->conislocal = true;
@ -8061,6 +8066,7 @@ TryReuseIndex(Oid oldId, IndexStmt *stmt)
stmt->excludeOpNames))
{
Relation irel = index_open(oldId, NoLock);
stmt->oldNode = irel->rd_node.relNode;
index_close(irel, NoLock);
}
@ -10267,9 +10273,9 @@ RangeVarCallbackOwnsTable(const RangeVar *relation,
return;
/*
* If the relation does exist, check whether it's an index. But note
* that the relation might have been dropped between the time we did the
* name lookup and now. In that case, there's nothing to do.
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
* lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
@ -10333,6 +10339,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
}
else if (IsA(stmt, AlterObjectSchemaStmt))
reltype = ((AlterObjectSchemaStmt *) stmt)->objectType;
else if (IsA(stmt, AlterTableStmt))
reltype = ((AlterTableStmt *) stmt)->relkind;
else
@ -10342,11 +10349,11 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
}
/*
* For compatibility with prior releases, we allow ALTER TABLE to be
* used with most other types of relations (but not composite types).
* We allow similar flexibility for ALTER INDEX in the case of RENAME,
* but not otherwise. Otherwise, the user must select the correct form
* of the command for the relation at issue.
* For compatibility with prior releases, we allow ALTER TABLE to be used
* with most other types of relations (but not composite types). We allow
* similar flexibility for ALTER INDEX in the case of RENAME, but not
* otherwise. Otherwise, the user must select the correct form of the
* command for the relation at issue.
*/
if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE)
ereport(ERROR,
@ -10391,10 +10398,10 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
errhint("Use ALTER FOREIGN TABLE instead.")));
/*
* Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be
* moved to a different schema, such as indexes and TOAST tables.
* Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be moved
* to a different schema, such as indexes and TOAST tables.
*/
if (IsA(stmt, AlterObjectSchemaStmt) && relkind != RELKIND_RELATION
if (IsA(stmt, AlterObjectSchemaStmt) &&relkind != RELKIND_RELATION
&& relkind != RELKIND_VIEW && relkind != RELKIND_SEQUENCE
&& relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,

View File

@ -438,6 +438,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
if (object_access_hook)
{
ObjectAccessDrop drop_arg;
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP, TableSpaceRelationId,
tablespaceoid, 0, &drop_arg);
@ -1204,9 +1205,9 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
* DATABASE SET or ALTER USER SET command. pg_dumpall dumps all
* roles before tablespaces, so if we're restoring a pg_dumpall
* script the tablespace might not yet exist, but will be created
* later. Because of that, issue a NOTICE if source == PGC_S_TEST,
* but accept the value anyway. Otherwise, silently ignore any
* bad list elements.
* later. Because of that, issue a NOTICE if source ==
* PGC_S_TEST, but accept the value anyway. Otherwise, silently
* ignore any bad list elements.
*/
curoid = get_tablespace_oid(curname, source <= PGC_S_TEST);
if (curoid == InvalidOid)
@ -1493,10 +1494,10 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
* files then do conflict processing and try again, if currently
* enabled.
*
* Other possible reasons for failure include bollixed file permissions
* on a standby server when they were okay on the primary, etc etc.
* There's not much we can do about that, so just remove what we can
* and press on.
* Other possible reasons for failure include bollixed file
* permissions on a standby server when they were okay on the primary,
* etc etc. There's not much we can do about that, so just remove what
* we can and press on.
*/
if (!destroy_tablespace_directories(xlrec->ts_id, true))
{

View File

@ -199,8 +199,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
/*
* We must take a lock on the target relation to protect against
* concurrent drop. It's not clear that AccessShareLock is strong
* enough, but we certainly need at least that much... otherwise,
* we might end up creating a pg_constraint entry referencing a
* enough, but we certainly need at least that much... otherwise, we
* might end up creating a pg_constraint entry referencing a
* nonexistent table.
*/
constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock, false);
@ -494,8 +494,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
* NOTE that this is cool only because we have AccessExclusiveLock on
* the relation, so the trigger set won't be changing underneath us.
* NOTE that this is cool only because we have AccessExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
{

View File

@ -1484,7 +1484,7 @@ static void
makeRangeConstructors(const char *name, Oid namespace,
Oid rangeOid, Oid subtype)
{
static const char * const prosrc[2] = {"range_constructor2",
static const char *const prosrc[2] = {"range_constructor2",
"range_constructor3"};
static const int pronargs[2] = {2, 3};

View File

@ -937,6 +937,7 @@ DropRole(DropRoleStmt *stmt)
if (object_access_hook)
{
ObjectAccessDrop drop_arg;
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP,
AuthIdRelationId, roleid, 0, &drop_arg);

View File

@ -322,13 +322,13 @@ get_rel_oids(Oid relid, const RangeVar *vacrel)
Oid relid;
/*
* Since we don't take a lock here, the relation might be gone,
* or the RangeVar might no longer refer to the OID we look up
* here. In the former case, VACUUM will do nothing; in the
* latter case, it will process the OID we looked up here, rather
* than the new one. Neither is ideal, but there's little practical
* alternative, since we're going to commit this transaction and
* begin a new one between now and then.
* Since we don't take a lock here, the relation might be gone, or the
* RangeVar might no longer refer to the OID we look up here. In the
* former case, VACUUM will do nothing; in the latter case, it will
* process the OID we looked up here, rather than the new one.
* Neither is ideal, but there's little practical alternative, since
* we're going to commit this transaction and begin a new one between
* now and then.
*/
relid = RangeVarGetRelid(vacrel, NoLock, false);

View File

@ -222,17 +222,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
*
* A corner case here is that if we scanned no pages at all because every
* page is all-visible, we should not update relpages/reltuples, because
* we have no new information to contribute. In particular this keeps
* us from replacing relpages=reltuples=0 (which means "unknown tuple
* we have no new information to contribute. In particular this keeps us
* from replacing relpages=reltuples=0 (which means "unknown tuple
* density") with nonzero relpages and reltuples=0 (which means "zero
* tuple density") unless there's some actual evidence for the latter.
*
* We do update relallvisible even in the corner case, since if the
* table is all-visible we'd definitely like to know that. But clamp
* the value to be not more than what we're setting relpages to.
* We do update relallvisible even in the corner case, since if the table
* is all-visible we'd definitely like to know that. But clamp the value
* to be not more than what we're setting relpages to.
*
* Also, don't change relfrozenxid if we skipped any pages, since then
* we don't know for certain that all tuples have a newer xmin.
* Also, don't change relfrozenxid if we skipped any pages, since then we
* don't know for certain that all tuples have a newer xmin.
*/
new_rel_pages = vacrelstats->rel_pages;
new_rel_tuples = vacrelstats->new_rel_tuples;
@ -277,9 +277,9 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
write_rate = 0;
if ((secs > 0) || (usecs > 0))
{
read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
(secs + usecs / 1000000.0);
write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
(secs + usecs / 1000000.0);
}
ereport(LOG,
@ -300,7 +300,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
VacuumPageHit,
VacuumPageMiss,
VacuumPageDirty,
read_rate,write_rate,
read_rate, write_rate,
pg_rusage_show(&ru0))));
}
}
@ -501,10 +501,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats->num_dead_tuples > 0)
{
/*
* Before beginning index vacuuming, we release any pin we may hold
* on the visibility map page. This isn't necessary for correctness,
* but we do it anyway to avoid holding the pin across a lengthy,
* unrelated operation.
* Before beginning index vacuuming, we release any pin we may
* hold on the visibility map page. This isn't necessary for
* correctness, but we do it anyway to avoid holding the pin
* across a lengthy, unrelated operation.
*/
if (BufferIsValid(vmbuffer))
{
@ -535,10 +535,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* Pin the visibility map page in case we need to mark the page
* all-visible. In most cases this will be very cheap, because we'll
* already have the correct page pinned anyway. However, it's possible
* that (a) next_not_all_visible_block is covered by a different VM page
* than the current block or (b) we released our pin and did a cycle of
* index vacuuming.
* already have the correct page pinned anyway. However, it's
* possible that (a) next_not_all_visible_block is covered by a
* different VM page than the current block or (b) we released our pin
* and did a cycle of index vacuuming.
*/
visibilitymap_pin(onerel, blkno, &vmbuffer);
@ -873,10 +873,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
else if (!all_visible_according_to_vm)
{
/*
* It should never be the case that the visibility map page
* is set while the page-level bit is clear, but the reverse
* is allowed. Set the visibility map bit as well so that
* we get back in sync.
* It should never be the case that the visibility map page is
* set while the page-level bit is clear, but the reverse is
* allowed. Set the visibility map bit as well so that we get
* back in sync.
*/
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer,
visibility_cutoff_xid);

View File

@ -204,8 +204,8 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace,
checkViewTupleDesc(descriptor, rel->rd_att);
/*
* The new options list replaces the existing options list, even
* if it's empty.
* The new options list replaces the existing options list, even if
* it's empty.
*/
atcmd = makeNode(AlterTableCmd);
atcmd->subtype = AT_ReplaceRelOptions;

View File

@ -66,6 +66,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
TIDBitmap *tbm;
TBMIterator *tbmiterator;
TBMIterateResult *tbmres;
#ifdef USE_PREFETCH
TBMIterator *prefetch_iterator;
#endif

View File

@ -176,8 +176,8 @@ StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, TupleDesc itupdesc)
* Note: we must use the tupdesc supplied by the AM in index_getattr, not
* the slot's tupdesc, in case the latter has different datatypes (this
* happens for btree name_ops in particular). They'd better have the same
* number of columns though, as well as being datatype-compatible which
* is something we can't so easily check.
* number of columns though, as well as being datatype-compatible which is
* something we can't so easily check.
*/
Assert(slot->tts_tupleDescriptor->natts == nindexatts);

View File

@ -419,8 +419,8 @@ MJCompare(MergeJoinState *mergestate)
/*
* If we had any NULL-vs-NULL inputs, we do not want to report that the
* tuples are equal. Instead, if result is still 0, change it to +1.
* This will result in advancing the inner side of the join.
* tuples are equal. Instead, if result is still 0, change it to +1. This
* will result in advancing the inner side of the join.
*
* Likewise, if there was a constant-false joinqual, do not report
* equality. We have to check this as part of the mergequals, else the

View File

@ -950,8 +950,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
* index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes.
* Also, inside an EvalPlanQual operation, the indexes might be open
* for a DELETE, however, since deletion doesn't affect indexes. Also,
* inside an EvalPlanQual operation, the indexes might be open
* already, since we share the resultrel state with the original
* query.
*/

View File

@ -1674,8 +1674,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
raw_parsetree_list = pg_parse_query(src);
/*
* Do parse analysis and rule rewrite for each raw parsetree, storing
* the results into unsaved plancache entries.
* Do parse analysis and rule rewrite for each raw parsetree, storing the
* results into unsaved plancache entries.
*/
plancache_list = NIL;
@ -1686,8 +1686,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
CachedPlanSource *plansource;
/*
* Create the CachedPlanSource before we do parse analysis, since
* it needs to see the unmodified raw parse tree.
* Create the CachedPlanSource before we do parse analysis, since it
* needs to see the unmodified raw parse tree.
*/
plansource = CreateCachedPlan(parsetree,
src,
@ -2335,9 +2335,9 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan)
/*
* Reparent all the CachedPlanSources into the procedure context. In
* theory this could fail partway through due to the pallocs, but we
* don't care too much since both the procedure context and the executor
* context would go away on error.
* theory this could fail partway through due to the pallocs, but we don't
* care too much since both the procedure context and the executor context
* would go away on error.
*/
foreach(lc, plan->plancache_list)
{

View File

@ -316,8 +316,8 @@ ClientAuthentication(Port *port)
/*
* Get the authentication method to use for this frontend/database
* combination. Note: we do not parse the file at this point; this has
* already been done elsewhere. hba.c dropped an error message
* into the server logfile if parsing the hba config file failed.
* already been done elsewhere. hba.c dropped an error message into the
* server logfile if parsing the hba config file failed.
*/
hba_getauthmethod(port);
@ -1365,10 +1365,10 @@ pg_SSPI_recvauth(Port *port)
}
/*
* Overwrite the current context with the one we just received.
* If sspictx is NULL it was the first loop and we need to allocate
* a buffer for it. On subsequent runs, we can just overwrite the
* buffer contents since the size does not change.
* Overwrite the current context with the one we just received. If
* sspictx is NULL it was the first loop and we need to allocate a
* buffer for it. On subsequent runs, we can just overwrite the buffer
* contents since the size does not change.
*/
if (sspictx == NULL)
{

View File

@ -845,8 +845,8 @@ initialize_SSL(void)
{
/*
* Always ask for SSL client cert, but don't fail if it's not
* presented. We might fail such connections later, depending on
* what we find in pg_hba.conf.
* presented. We might fail such connections later, depending on what
* we find in pg_hba.conf.
*/
SSL_CTX_set_verify(SSL_context,
(SSL_VERIFY_PEER |

View File

@ -443,10 +443,9 @@ is_member(Oid userid, const char *role)
return false; /* if target role not exist, say "no" */
/*
* See if user is directly or indirectly a member of role.
* For this purpose, a superuser is not considered to be automatically
* a member of the role, so group auth only applies to explicit
* membership.
* See if user is directly or indirectly a member of role. For this
* purpose, a superuser is not considered to be automatically a member of
* the role, so group auth only applies to explicit membership.
*/
return is_member_of_role_nosuper(userid, roleid);
}
@ -1293,6 +1292,7 @@ parse_hba_line(List *line, int line_num)
foreach(tokencell, tokens)
{
char *val;
token = lfirst(tokencell);
str = pstrdup(token->string);
@ -1397,9 +1397,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
else if (strcmp(name, "clientcert") == 0)
{
/*
* Since we require ctHostSSL, this really can never happen
* on non-SSL-enabled builds, so don't bother checking for
* USE_SSL.
* Since we require ctHostSSL, this really can never happen on
* non-SSL-enabled builds, so don't bother checking for USE_SSL.
*/
if (hbaline->conntype != ctHostSSL)
{
@ -1742,8 +1741,8 @@ load_hba(void)
{
/*
* Parse error in the file, so indicate there's a problem. NB: a
* problem in a line will free the memory for all previous lines as
* well!
* problem in a line will free the memory for all previous lines
* as well!
*/
MemoryContextReset(hbacxt);
new_parsed_lines = NIL;
@ -1761,9 +1760,9 @@ load_hba(void)
}
/*
* A valid HBA file must have at least one entry; else there's no way
* to connect to the postmaster. But only complain about this if we
* didn't already have parsing errors.
* A valid HBA file must have at least one entry; else there's no way to
* connect to the postmaster. But only complain about this if we didn't
* already have parsing errors.
*/
if (ok && new_parsed_lines == NIL)
{

View File

@ -1247,9 +1247,9 @@ internal_flush(void)
/*
* We drop the buffered data anyway so that processing can
* continue, even though we'll probably quit soon. We also
* set a flag that'll cause the next CHECK_FOR_INTERRUPTS
* to terminate the connection.
* continue, even though we'll probably quit soon. We also set a
* flag that'll cause the next CHECK_FOR_INTERRUPTS to terminate
* the connection.
*/
PqSendStart = PqSendPointer = 0;
ClientConnectionLost = 1;

View File

@ -992,14 +992,14 @@ tbm_lossify(TIDBitmap *tbm)
}
/*
* With a big bitmap and small work_mem, it's possible that we cannot
* get under maxentries. Again, if that happens, we'd end up uselessly
* With a big bitmap and small work_mem, it's possible that we cannot get
* under maxentries. Again, if that happens, we'd end up uselessly
* calling tbm_lossify over and over. To prevent this from becoming a
* performance sink, force maxentries up to at least double the current
* number of entries. (In essence, we're admitting inability to fit
* within work_mem when we do this.) Note that this test will not fire
* if we broke out of the loop early; and if we didn't, the current
* number of entries is simply not reducible any further.
* within work_mem when we do this.) Note that this test will not fire if
* we broke out of the loop early; and if we didn't, the current number of
* entries is simply not reducible any further.
*/
if (tbm->nentries > tbm->maxentries / 2)
tbm->maxentries = Min(tbm->nentries, (INT_MAX - 1) / 2) * 2;
@ -1011,8 +1011,8 @@ tbm_lossify(TIDBitmap *tbm)
static int
tbm_comparator(const void *left, const void *right)
{
BlockNumber l = (*((PagetableEntry * const *) left))->blockno;
BlockNumber r = (*((PagetableEntry * const *) right))->blockno;
BlockNumber l = (*((PagetableEntry *const *) left))->blockno;
BlockNumber r = (*((PagetableEntry *const *) right))->blockno;
if (l < r)
return -1;

View File

@ -65,8 +65,8 @@ geqo_selection(PlannerInfo *root, Chromosome *momma, Chromosome *daddy,
* one, when we can't.
*
* This code was observed to hang up in an infinite loop when the
* platform's implementation of erand48() was broken. We now always
* use our own version.
* platform's implementation of erand48() was broken. We now always use
* our own version.
*/
if (pool->size > 1)
{

View File

@ -251,6 +251,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
}
break;
case RTE_SUBQUERY:
/*
* Subqueries don't support parameterized paths, so just go
* ahead and build their paths immediately.
@ -264,6 +265,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
set_values_size_estimates(root, rel);
break;
case RTE_CTE:
/*
* CTEs don't support parameterized paths, so just go ahead
* and build their paths immediately.
@ -574,8 +576,8 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* It is possible that constraint exclusion detected a contradiction
* within a child subquery, even though we didn't prove one above.
* If so, we can skip this child.
* within a child subquery, even though we didn't prove one above. If
* so, we can skip this child.
*/
if (IS_DUMMY_REL(childrel))
continue;
@ -1115,9 +1117,9 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
rel->subroot = subroot;
/*
* It's possible that constraint exclusion proved the subquery empty.
* If so, it's convenient to turn it back into a dummy path so that we
* will recognize appropriate optimizations at this level.
* It's possible that constraint exclusion proved the subquery empty. If
* so, it's convenient to turn it back into a dummy path so that we will
* recognize appropriate optimizations at this level.
*/
if (is_dummy_plan(rel->subplan))
{

View File

@ -3221,8 +3221,8 @@ compute_semi_anti_join_factors(PlannerInfo *root,
*
* Note: it is correct to use the inner rel's "rows" count here, even
* though we might later be considering a parameterized inner path with
* fewer rows. This is because we have included all the join clauses
* in the selectivity estimate.
* fewer rows. This is because we have included all the join clauses in
* the selectivity estimate.
*/
if (jselec > 0) /* protect against zero divide */
{
@ -3282,6 +3282,7 @@ has_indexed_join_quals(NestPath *joinpath)
break;
}
default:
/*
* If it's not a simple indexscan, it probably doesn't run quickly
* for zero rows out, even if it's a parameterized path using all
@ -3293,8 +3294,8 @@ has_indexed_join_quals(NestPath *joinpath)
/*
* Examine the inner path's param clauses. Any that are from the outer
* path must be found in the indexclauses list, either exactly or in an
* equivalent form generated by equivclass.c. Also, we must find at
* least one such clause, else it's a clauseless join which isn't fast.
* equivalent form generated by equivclass.c. Also, we must find at least
* one such clause, else it's a clauseless join which isn't fast.
*/
found_one = false;
foreach(lc, innerpath->param_info->ppi_clauses)

View File

@ -1217,9 +1217,9 @@ generate_join_implied_equalities_broken(PlannerInfo *root,
/*
* If we have to translate, just brute-force apply adjust_appendrel_attrs
* to all the RestrictInfos at once. This will result in returning
* RestrictInfos that are not listed in ec_derives, but there shouldn't
* be any duplication, and it's a sufficiently narrow corner case that
* we shouldn't sweat too much over it anyway.
* RestrictInfos that are not listed in ec_derives, but there shouldn't be
* any duplication, and it's a sufficiently narrow corner case that we
* shouldn't sweat too much over it anyway.
*/
if (inner_appinfo)
result = (List *) adjust_appendrel_attrs(root, (Node *) result,

View File

@ -246,24 +246,24 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
/*
* Identify the join clauses that can match the index. For the moment
* we keep them separate from the restriction clauses. Note that
* this finds only "loose" join clauses that have not been merged
* into EquivalenceClasses. Also, collect join OR clauses for later.
* we keep them separate from the restriction clauses. Note that this
* finds only "loose" join clauses that have not been merged into
* EquivalenceClasses. Also, collect join OR clauses for later.
*/
MemSet(&jclauseset, 0, sizeof(jclauseset));
match_join_clauses_to_index(root, rel, index,
&jclauseset, &joinorclauses);
/*
* Look for EquivalenceClasses that can generate joinclauses
* matching the index.
* Look for EquivalenceClasses that can generate joinclauses matching
* the index.
*/
MemSet(&eclauseset, 0, sizeof(eclauseset));
match_eclass_clauses_to_index(root, index, &eclauseset);
/*
* If we found any plain or eclass join clauses, decide what to
* do with 'em.
* If we found any plain or eclass join clauses, decide what to do
* with 'em.
*/
if (jclauseset.nonempty || eclauseset.nonempty)
consider_index_join_clauses(root, rel, index,
@ -366,19 +366,19 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
* We can always include any restriction clauses in the index clauses.
* However, it's not obvious which subsets of the join clauses are worth
* generating paths from, and it's unlikely that considering every
* possible subset is worth the cycles. Our current heuristic is based
* on the index columns, with the idea that later index columns are less
* possible subset is worth the cycles. Our current heuristic is based on
* the index columns, with the idea that later index columns are less
* useful than earlier ones; therefore it's unlikely to be worth trying
* combinations that would remove a clause from an earlier index column
* while adding one to a later column. Also, we know that all the
* eclass clauses for a particular column are redundant, so we should
* use only one of them. However, eclass clauses will always represent
* equality which is the strongest type of index constraint, so those
* are high-value and we should try every available combination when we
* have eclass clauses for more than one column. Furthermore, it's
* unlikely to be useful to combine an eclass clause with non-eclass
* clauses for the same index column. These considerations lead to the
* following heuristics:
* while adding one to a later column. Also, we know that all the eclass
* clauses for a particular column are redundant, so we should use only
* one of them. However, eclass clauses will always represent equality
* which is the strongest type of index constraint, so those are
* high-value and we should try every available combination when we have
* eclass clauses for more than one column. Furthermore, it's unlikely to
* be useful to combine an eclass clause with non-eclass clauses for the
* same index column. These considerations lead to the following
* heuristics:
*
* First, start with the restriction clauses, and add on all simple join
* clauses for column 1. If there are any such join clauses, generate
@ -542,17 +542,16 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
SAOP_PER_AM, ST_ANYSCAN);
/*
* Submit all the ones that can form plain IndexScan plans to add_path.
* (A plain IndexPath can represent either a plain IndexScan or an
* Submit all the ones that can form plain IndexScan plans to add_path. (A
* plain IndexPath can represent either a plain IndexScan or an
* IndexOnlyScan, but for our purposes here that distinction does not
* matter. However, some of the indexes might support only bitmap scans,
* and those we mustn't submit to add_path here.)
*
* Also, pick out the ones that are usable as bitmap scans. For that,
* we must discard indexes that don't support bitmap scans, and we
* also are only interested in paths that have some selectivity; we
* should discard anything that was generated solely for ordering
* purposes.
* Also, pick out the ones that are usable as bitmap scans. For that, we
* must discard indexes that don't support bitmap scans, and we also are
* only interested in paths that have some selectivity; we should discard
* anything that was generated solely for ordering purposes.
*/
foreach(lc, indexpaths)
{
@ -568,9 +567,9 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
* If the index doesn't handle ScalarArrayOpExpr clauses natively,
* check to see if there are any such clauses, and if so generate
* bitmap scan paths relying on executor-managed ScalarArrayOpExpr.
* If the index doesn't handle ScalarArrayOpExpr clauses natively, check
* to see if there are any such clauses, and if so generate bitmap scan
* paths relying on executor-managed ScalarArrayOpExpr.
*/
if (!index->amsearcharray)
{
@ -658,19 +657,19 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
/*
* 1. Collect the index clauses into a single list.
*
* We build a list of RestrictInfo nodes for clauses to be used with
* this index, along with an integer list of the index column numbers
* (zero based) that each clause should be used with. The clauses are
* ordered by index key, so that the column numbers form a nondecreasing
* sequence. (This order is depended on by btree and possibly other
* places.) The lists can be empty, if the index AM allows that.
* We build a list of RestrictInfo nodes for clauses to be used with this
* index, along with an integer list of the index column numbers (zero
* based) that each clause should be used with. The clauses are ordered
* by index key, so that the column numbers form a nondecreasing sequence.
* (This order is depended on by btree and possibly other places.) The
* lists can be empty, if the index AM allows that.
*
* found_clause is set true only if there's at least one index clause;
* and if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr
* found_clause is set true only if there's at least one index clause; and
* if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr
* clause.
*
* We also build a Relids set showing which outer rels are required
* by the selected clauses.
* We also build a Relids set showing which outer rels are required by the
* selected clauses.
*/
index_clauses = NIL;
clause_columns = NIL;
@ -706,8 +705,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
* If no clauses match the first index column, check for amoptionalkey
* restriction. We can't generate a scan over an index with
* amoptionalkey = false unless there's at least one index clause.
* (When working on columns after the first, this test cannot fail.
* It is always okay for columns after the first to not have any
* (When working on columns after the first, this test cannot fail. It
* is always okay for columns after the first to not have any
* clauses.)
*/
if (index_clauses == NIL && !index->amoptionalkey)
@ -865,8 +864,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
/*
* Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK. Otherwise, we have
* to test whether the added clauses are sufficient to imply the
* index is marked predOK then we know it's OK. Otherwise, we have to
* test whether the added clauses are sufficient to imply the
* predicate. If so, we can use the index in the current context.
*
* We set useful_predicate to true iff the predicate was proven using
@ -904,8 +903,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
match_clauses_to_index(index, clauses, &clauseset);
/*
* If no matches so far, and the index predicate isn't useful,
* we don't want it.
* If no matches so far, and the index predicate isn't useful, we
* don't want it.
*/
if (!clauseset.nonempty && !useful_predicate)
continue;
@ -1581,16 +1580,16 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
return false;
/*
* Check that all needed attributes of the relation are available from
* the index.
* Check that all needed attributes of the relation are available from the
* index.
*
* XXX this is overly conservative for partial indexes, since we will
* consider attributes involved in the index predicate as required even
* though the predicate won't need to be checked at runtime. (The same
* is true for attributes used only in index quals, if we are certain
* that the index is not lossy.) However, it would be quite expensive
* to determine that accurately at this point, so for now we take the
* easy way out.
* though the predicate won't need to be checked at runtime. (The same is
* true for attributes used only in index quals, if we are certain that
* the index is not lossy.) However, it would be quite expensive to
* determine that accurately at this point, so for now we take the easy
* way out.
*/
/*
@ -2195,8 +2194,8 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
/*
* We allow any column of the index to match each pathkey; they
* don't have to match left-to-right as you might expect. This
* is correct for GiST, which is the sole existing AM supporting
* don't have to match left-to-right as you might expect. This is
* correct for GiST, which is the sole existing AM supporting
* amcanorderbyop. We might need different logic in future for
* other implementations.
*/
@ -2393,8 +2392,8 @@ eclass_member_matches_indexcol(EquivalenceClass *ec, EquivalenceMember *em,
* If it's a btree index, we can reject it if its opfamily isn't
* compatible with the EC, since no clause generated from the EC could be
* used with the index. For non-btree indexes, we can't easily tell
* whether clauses generated from the EC could be used with the index,
* so don't check the opfamily. This might mean we return "true" for a
* whether clauses generated from the EC could be used with the index, so
* don't check the opfamily. This might mean we return "true" for a
* useless EC, so we have to recheck the results of
* generate_implied_equalities_for_indexcol; see
* match_eclass_clauses_to_index.
@ -3300,9 +3299,9 @@ adjust_rowcompare_for_index(RowCompareExpr *clause,
/*
* See how many of the remaining columns match some index column in the
* same way. As in match_clause_to_indexcol(), the "other" side of
* any potential index condition is OK as long as it doesn't use Vars from
* the indexed relation.
* same way. As in match_clause_to_indexcol(), the "other" side of any
* potential index condition is OK as long as it doesn't use Vars from the
* indexed relation.
*/
matching_cols = 1;
largs_cell = lnext(list_head(clause->largs));

View File

@ -219,8 +219,8 @@ try_nestloop_path(PlannerInfo *root,
JoinCostWorkspace workspace;
/*
* Check to see if proposed path is still parameterized, and reject if
* the parameterization wouldn't be sensible.
* Check to see if proposed path is still parameterized, and reject if the
* parameterization wouldn't be sensible.
*/
required_outer = calc_nestloop_required_outer(outer_path,
inner_path);
@ -292,8 +292,8 @@ try_mergejoin_path(PlannerInfo *root,
JoinCostWorkspace workspace;
/*
* Check to see if proposed path is still parameterized, and reject if
* the parameterization wouldn't be sensible.
* Check to see if proposed path is still parameterized, and reject if the
* parameterization wouldn't be sensible.
*/
required_outer = calc_non_nestloop_required_outer(outer_path,
inner_path);
@ -371,8 +371,8 @@ try_hashjoin_path(PlannerInfo *root,
JoinCostWorkspace workspace;
/*
* Check to see if proposed path is still parameterized, and reject if
* the parameterization wouldn't be sensible.
* Check to see if proposed path is still parameterized, and reject if the
* parameterization wouldn't be sensible.
*/
required_outer = calc_non_nestloop_required_outer(outer_path,
inner_path);
@ -582,8 +582,8 @@ sort_inner_and_outer(PlannerInfo *root,
* And now we can make the path.
*
* Note: it's possible that the cheapest paths will already be sorted
* properly. try_mergejoin_path will detect that case and suppress
* an explicit sort step, so we needn't do so here.
* properly. try_mergejoin_path will detect that case and suppress an
* explicit sort step, so we needn't do so here.
*/
try_mergejoin_path(root,
joinrel,
@ -775,8 +775,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Consider nestloop joins using this outer path and various
* available paths for the inner relation. We consider the
* cheapest-total paths for each available parameterization of
* the inner relation, including the unparameterized case.
* cheapest-total paths for each available parameterization of the
* inner relation, including the unparameterized case.
*/
ListCell *lc2;
@ -847,8 +847,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Generate a mergejoin on the basis of sorting the cheapest inner.
* Since a sort will be needed, only cheapest total cost matters. (But
* try_mergejoin_path will do the right thing if
* inner_cheapest_total is already correctly sorted.)
* try_mergejoin_path will do the right thing if inner_cheapest_total
* is already correctly sorted.)
*/
try_mergejoin_path(root,
joinrel,
@ -873,9 +873,9 @@ match_unsorted_outer(PlannerInfo *root,
* mergejoin using a subset of the merge clauses. Here, we consider
* both cheap startup cost and cheap total cost.
*
* Currently we do not consider parameterized inner paths here.
* This interacts with decisions elsewhere that also discriminate
* against mergejoins with parameterized inputs; see comments in
* Currently we do not consider parameterized inner paths here. This
* interacts with decisions elsewhere that also discriminate against
* mergejoins with parameterized inputs; see comments in
* src/backend/optimizer/README.
*
* As we shorten the sortkey list, we should consider only paths that

View File

@ -95,8 +95,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
/*
* Find potentially interesting OR joinclauses. We can use any joinclause
* that is considered safe to move to this rel by the parameterized-path
* machinery, even though what we are going to do with it is not exactly
* a parameterized path.
* machinery, even though what we are going to do with it is not exactly a
* parameterized path.
*/
foreach(i, rel->joininfo)
{

View File

@ -1880,8 +1880,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
Assert(rte->rtekind == RTE_RELATION);
/*
* Sort clauses into best execution order. We do this first since the
* FDW might have more info than we do and wish to adjust the ordering.
* Sort clauses into best execution order. We do this first since the FDW
* might have more info than we do and wish to adjust the ordering.
*/
scan_clauses = order_qual_clauses(root, scan_clauses);
@ -2523,9 +2523,9 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
/*
* If not to be replaced, just return the PlaceHolderVar unmodified.
* We use bms_overlap as a cheap/quick test to see if the PHV might
* be evaluated in the outer rels, and then grab its PlaceHolderInfo
* to tell for sure.
* We use bms_overlap as a cheap/quick test to see if the PHV might be
* evaluated in the outer rels, and then grab its PlaceHolderInfo to
* tell for sure.
*/
if (!bms_overlap(phv->phrels, root->curOuterRels))
return node;
@ -3690,13 +3690,12 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
{
/*
* If we are given a sort column number to match, only consider
* the single TLE at that position. It's possible that there
* is no such TLE, in which case fall through and generate a
* resjunk targetentry (we assume this must have happened in the
* parent plan as well). If there is a TLE but it doesn't match
* the pathkey's EC, we do the same, which is probably the wrong
* thing but we'll leave it to caller to complain about the
* mismatch.
* the single TLE at that position. It's possible that there is
* no such TLE, in which case fall through and generate a resjunk
* targetentry (we assume this must have happened in the parent
* plan as well). If there is a TLE but it doesn't match the
* pathkey's EC, we do the same, which is probably the wrong thing
* but we'll leave it to caller to complain about the mismatch.
*/
tle = get_tle_by_resno(tlist, reqColIdx[numsortkeys]);
if (tle)
@ -3746,11 +3745,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
if (!tle)
{
/*
* No matching tlist item; look for a computable expression.
* Note that we treat Aggrefs as if they were variables; this
* is necessary when attempting to sort the output from an Agg
* node for use in a WindowFunc (since grouping_planner will
* have treated the Aggrefs as variables, too).
* No matching tlist item; look for a computable expression. Note
* that we treat Aggrefs as if they were variables; this is
* necessary when attempting to sort the output from an Agg node
* for use in a WindowFunc (since grouping_planner will have
* treated the Aggrefs as variables, too).
*/
Expr *sortexpr = NULL;
@ -3769,7 +3768,8 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
continue;
/*
* Ignore child members unless they match the rel being sorted.
* Ignore child members unless they match the rel being
* sorted.
*/
if (em->em_is_child &&
!bms_equal(em->em_relids, relids))
@ -3877,8 +3877,7 @@ find_ec_member_for_tle(EquivalenceClass *ec,
/*
* We shouldn't be trying to sort by an equivalence class that
* contains a constant, so no need to consider such cases any
* further.
* contains a constant, so no need to consider such cases any further.
*/
if (em->em_is_const)
continue;

View File

@ -192,9 +192,9 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
where_needed);
/*
* If we are creating PlaceHolderInfos, mark them with the
* correct maybe-needed locations. Otherwise, it's too late to
* change that.
* If we are creating PlaceHolderInfos, mark them with the correct
* maybe-needed locations. Otherwise, it's too late to change
* that.
*/
if (create_new_ph)
mark_placeholder_maybe_needed(root, phinfo, where_needed);

View File

@ -146,8 +146,8 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Make a flattened version of the rangetable for faster access (this is
* OK because the rangetable won't change any more), and set up an
* empty array for indexing base relations.
* OK because the rangetable won't change any more), and set up an empty
* array for indexing base relations.
*/
setup_simple_rel_arrays(root);

View File

@ -766,9 +766,9 @@ inheritance_planner(PlannerInfo *root)
/*
* The rowMarks list might contain references to subquery RTEs, so
* make a copy that we can apply ChangeVarNodes to. (Fortunately,
* the executor doesn't need to see the modified copies --- we can
* just pass it the original rowMarks list.)
* make a copy that we can apply ChangeVarNodes to. (Fortunately, the
* executor doesn't need to see the modified copies --- we can just
* pass it the original rowMarks list.)
*/
subroot.rowMarks = (List *) copyObject(root->rowMarks);
@ -784,10 +784,11 @@ inheritance_planner(PlannerInfo *root)
/*
* If this isn't the first child Query, generate duplicates of all
* subquery RTEs, and adjust Var numbering to reference the duplicates.
* To simplify the loop logic, we scan the original rtable not the
* copy just made by adjust_appendrel_attrs; that should be OK since
* subquery RTEs couldn't contain any references to the target rel.
* subquery RTEs, and adjust Var numbering to reference the
* duplicates. To simplify the loop logic, we scan the original rtable
* not the copy just made by adjust_appendrel_attrs; that should be OK
* since subquery RTEs couldn't contain any references to the target
* rel.
*/
if (final_rtable != NIL)
{
@ -1317,18 +1318,17 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
need_sort_for_grouping = true;
/*
* Always override create_plan's tlist, so that we don't
* sort useless data from a "physical" tlist.
* Always override create_plan's tlist, so that we don't sort
* useless data from a "physical" tlist.
*/
need_tlist_eval = true;
}
/*
* create_plan returns a plan with just a "flat" tlist of
* required Vars. Usually we need to insert the sub_tlist as the
* tlist of the top plan node. However, we can skip that if we
* determined that whatever create_plan chose to return will be
* good enough.
* create_plan returns a plan with just a "flat" tlist of required
* Vars. Usually we need to insert the sub_tlist as the tlist of
* the top plan node. However, we can skip that if we determined
* that whatever create_plan chose to return will be good enough.
*/
if (need_tlist_eval)
{
@ -2653,8 +2653,8 @@ make_subplanTargetList(PlannerInfo *root,
}
/*
* Otherwise, we must build a tlist containing all grouping columns,
* plus any other Vars mentioned in the targetlist and HAVING qual.
* Otherwise, we must build a tlist containing all grouping columns, plus
* any other Vars mentioned in the targetlist and HAVING qual.
*/
sub_tlist = NIL;
non_group_cols = NIL;
@ -2705,8 +2705,8 @@ make_subplanTargetList(PlannerInfo *root,
else
{
/*
* Non-grouping column, so just remember the expression
* for later call to pull_var_clause. There's no need for
* Non-grouping column, so just remember the expression for
* later call to pull_var_clause. There's no need for
* pull_var_clause to examine the TargetEntry node itself.
*/
non_group_cols = lappend(non_group_cols, tle->expr);

View File

@ -1822,8 +1822,8 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
}
/*
* Don't recurse into the arguments of an outer PHV or aggregate here.
* Any SubLinks in the arguments have to be dealt with at the outer query
* Don't recurse into the arguments of an outer PHV or aggregate here. Any
* SubLinks in the arguments have to be dealt with at the outer query
* level; they'll be handled when build_subplan collects the PHV or Aggref
* into the arguments to be passed down to the current subplan.
*/

View File

@ -332,6 +332,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@ -357,6 +358,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@ -384,6 +386,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@ -409,6 +412,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@ -448,11 +452,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
/*
* Now recursively process the pulled-up quals. Because
* we are underneath a NOT, we can't pull up sublinks
* that reference the left-hand stuff, but it's still
* okay to pull up sublinks referencing j->rarg.
* we are underneath a NOT, we can't pull up sublinks that
* reference the left-hand stuff, but it's still okay to
* pull up sublinks referencing j->rarg.
*/
j->quals = pull_up_sublinks_qual_recurse(root,
j->quals,
@ -473,11 +478,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
/*
* Now recursively process the pulled-up quals. Because
* we are underneath a NOT, we can't pull up sublinks
* that reference the left-hand stuff, but it's still
* okay to pull up sublinks referencing j->rarg.
* we are underneath a NOT, we can't pull up sublinks that
* reference the left-hand stuff, but it's still okay to
* pull up sublinks referencing j->rarg.
*/
j->quals = pull_up_sublinks_qual_recurse(root,
j->quals,

Some files were not shown because too many files have changed in this diff Show More